var/home/core/zuul-output/0000755000175000017500000000000015111431747014531 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111443301015462 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005043540215111443272017700 0ustar rootrootNov 25 22:59:02 crc systemd[1]: Starting Kubernetes Kubelet... Nov 25 22:59:02 crc restorecon[4754]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:02 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 22:59:03 crc restorecon[4754]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 22:59:03 crc restorecon[4754]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 25 22:59:04 crc kubenswrapper[5045]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 22:59:04 crc kubenswrapper[5045]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 25 22:59:04 crc kubenswrapper[5045]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 22:59:04 crc kubenswrapper[5045]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 22:59:04 crc kubenswrapper[5045]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 25 22:59:04 crc kubenswrapper[5045]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.109368 5045 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115064 5045 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115099 5045 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115112 5045 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115124 5045 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115134 5045 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115145 5045 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115157 5045 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115184 5045 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115194 5045 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115203 5045 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115213 5045 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115221 5045 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115230 5045 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115239 5045 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115248 5045 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115257 5045 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115265 5045 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115273 5045 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115282 5045 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115290 5045 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115299 5045 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115307 5045 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115316 5045 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115324 5045 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115333 5045 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115342 5045 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115351 5045 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115359 5045 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115367 5045 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115375 5045 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115384 5045 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115393 5045 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115401 5045 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115409 5045 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115418 5045 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115427 5045 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115436 5045 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115445 5045 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115456 5045 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115465 5045 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115474 5045 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115482 5045 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115491 5045 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115500 5045 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115508 5045 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115518 5045 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115527 5045 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115535 5045 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115576 5045 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115587 5045 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115598 5045 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115610 5045 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115619 5045 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115629 5045 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115637 5045 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115646 5045 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115655 5045 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115664 5045 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115673 5045 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115682 5045 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115690 5045 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115699 5045 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115707 5045 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115743 5045 feature_gate.go:330] unrecognized feature gate: Example Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115752 5045 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115760 5045 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115771 5045 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115781 5045 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115790 5045 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115798 5045 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.115807 5045 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117589 5045 flags.go:64] FLAG: --address="0.0.0.0" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117617 5045 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117636 5045 flags.go:64] FLAG: --anonymous-auth="true" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117649 5045 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117697 5045 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117733 5045 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117748 5045 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117760 5045 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117770 5045 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117780 5045 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117791 5045 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117803 5045 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117813 5045 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117823 5045 flags.go:64] FLAG: --cgroup-root="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117834 5045 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117844 5045 flags.go:64] FLAG: --client-ca-file="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117854 5045 flags.go:64] FLAG: --cloud-config="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117863 5045 flags.go:64] FLAG: --cloud-provider="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117872 5045 flags.go:64] FLAG: --cluster-dns="[]" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117884 5045 flags.go:64] FLAG: --cluster-domain="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117894 5045 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117904 5045 flags.go:64] FLAG: --config-dir="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117914 5045 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117924 5045 flags.go:64] FLAG: --container-log-max-files="5" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117937 5045 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117947 5045 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117958 5045 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117968 5045 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117977 5045 flags.go:64] FLAG: --contention-profiling="false" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117987 5045 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.117997 5045 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118008 5045 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118018 5045 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118032 5045 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118045 5045 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118057 5045 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118069 5045 flags.go:64] FLAG: --enable-load-reader="false" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118082 5045 flags.go:64] FLAG: --enable-server="true" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118094 5045 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118112 5045 flags.go:64] FLAG: --event-burst="100" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118127 5045 flags.go:64] FLAG: --event-qps="50" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118139 5045 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118151 5045 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118162 5045 flags.go:64] FLAG: --eviction-hard="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118175 5045 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118186 5045 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118198 5045 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118209 5045 flags.go:64] FLAG: --eviction-soft="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118220 5045 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118230 5045 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118240 5045 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118250 5045 flags.go:64] FLAG: --experimental-mounter-path="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118260 5045 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118270 5045 flags.go:64] FLAG: --fail-swap-on="true" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118279 5045 flags.go:64] FLAG: --feature-gates="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118291 5045 flags.go:64] FLAG: --file-check-frequency="20s" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118301 5045 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118314 5045 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118327 5045 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118340 5045 flags.go:64] FLAG: --healthz-port="10248" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118352 5045 flags.go:64] FLAG: --help="false" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118366 5045 flags.go:64] FLAG: --hostname-override="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118378 5045 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118391 5045 flags.go:64] FLAG: --http-check-frequency="20s" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118404 5045 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118417 5045 flags.go:64] FLAG: --image-credential-provider-config="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118429 5045 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118441 5045 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118454 5045 flags.go:64] FLAG: --image-service-endpoint="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118466 5045 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118479 5045 flags.go:64] FLAG: --kube-api-burst="100" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118491 5045 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118504 5045 flags.go:64] FLAG: --kube-api-qps="50" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118516 5045 flags.go:64] FLAG: --kube-reserved="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118528 5045 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118539 5045 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118555 5045 flags.go:64] FLAG: --kubelet-cgroups="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118566 5045 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118579 5045 flags.go:64] FLAG: --lock-file="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118591 5045 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118604 5045 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118616 5045 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118636 5045 flags.go:64] FLAG: --log-json-split-stream="false" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118648 5045 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118659 5045 flags.go:64] FLAG: --log-text-split-stream="false" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118669 5045 flags.go:64] FLAG: --logging-format="text" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118679 5045 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118690 5045 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118700 5045 flags.go:64] FLAG: --manifest-url="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118741 5045 flags.go:64] FLAG: --manifest-url-header="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118755 5045 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118766 5045 flags.go:64] FLAG: --max-open-files="1000000" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118777 5045 flags.go:64] FLAG: --max-pods="110" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118787 5045 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118798 5045 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118810 5045 flags.go:64] FLAG: --memory-manager-policy="None" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118820 5045 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118831 5045 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118842 5045 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118852 5045 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118875 5045 flags.go:64] FLAG: --node-status-max-images="50" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118886 5045 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118895 5045 flags.go:64] FLAG: --oom-score-adj="-999" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118906 5045 flags.go:64] FLAG: --pod-cidr="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118915 5045 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118929 5045 flags.go:64] FLAG: --pod-manifest-path="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118940 5045 flags.go:64] FLAG: --pod-max-pids="-1" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118950 5045 flags.go:64] FLAG: --pods-per-core="0" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118960 5045 flags.go:64] FLAG: --port="10250" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118971 5045 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118981 5045 flags.go:64] FLAG: --provider-id="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.118991 5045 flags.go:64] FLAG: --qos-reserved="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119003 5045 flags.go:64] FLAG: --read-only-port="10255" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119015 5045 flags.go:64] FLAG: --register-node="true" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119025 5045 flags.go:64] FLAG: --register-schedulable="true" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119034 5045 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119050 5045 flags.go:64] FLAG: --registry-burst="10" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119060 5045 flags.go:64] FLAG: --registry-qps="5" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119070 5045 flags.go:64] FLAG: --reserved-cpus="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119080 5045 flags.go:64] FLAG: --reserved-memory="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119093 5045 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119103 5045 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119113 5045 flags.go:64] FLAG: --rotate-certificates="false" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119123 5045 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119133 5045 flags.go:64] FLAG: --runonce="false" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119143 5045 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119154 5045 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119164 5045 flags.go:64] FLAG: --seccomp-default="false" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119174 5045 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119184 5045 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119194 5045 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119205 5045 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119215 5045 flags.go:64] FLAG: --storage-driver-password="root" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119226 5045 flags.go:64] FLAG: --storage-driver-secure="false" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119236 5045 flags.go:64] FLAG: --storage-driver-table="stats" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119246 5045 flags.go:64] FLAG: --storage-driver-user="root" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119256 5045 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119266 5045 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119276 5045 flags.go:64] FLAG: --system-cgroups="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119286 5045 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119300 5045 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119310 5045 flags.go:64] FLAG: --tls-cert-file="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119319 5045 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119332 5045 flags.go:64] FLAG: --tls-min-version="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119342 5045 flags.go:64] FLAG: --tls-private-key-file="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119352 5045 flags.go:64] FLAG: --topology-manager-policy="none" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119362 5045 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119372 5045 flags.go:64] FLAG: --topology-manager-scope="container" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119383 5045 flags.go:64] FLAG: --v="2" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119397 5045 flags.go:64] FLAG: --version="false" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119410 5045 flags.go:64] FLAG: --vmodule="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119422 5045 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.119434 5045 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119658 5045 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119675 5045 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119688 5045 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119700 5045 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119742 5045 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119755 5045 feature_gate.go:330] unrecognized feature gate: Example Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119767 5045 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119779 5045 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119788 5045 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119798 5045 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119808 5045 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119819 5045 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119835 5045 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119847 5045 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119859 5045 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119869 5045 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119880 5045 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119891 5045 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119905 5045 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119918 5045 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119929 5045 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119939 5045 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119949 5045 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119960 5045 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119971 5045 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119980 5045 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119989 5045 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.119997 5045 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120006 5045 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120015 5045 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120024 5045 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120035 5045 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120044 5045 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120052 5045 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120061 5045 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120070 5045 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120079 5045 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120087 5045 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120096 5045 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120107 5045 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120119 5045 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120129 5045 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120140 5045 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120150 5045 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120161 5045 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120173 5045 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120183 5045 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120195 5045 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120207 5045 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120216 5045 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120227 5045 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120239 5045 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120251 5045 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120263 5045 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120274 5045 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120284 5045 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120296 5045 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120306 5045 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120317 5045 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120327 5045 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120338 5045 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120350 5045 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120361 5045 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120372 5045 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120382 5045 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120390 5045 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120402 5045 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120426 5045 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120436 5045 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120445 5045 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.120456 5045 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.120481 5045 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.132201 5045 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.132246 5045 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132343 5045 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132351 5045 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132355 5045 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132360 5045 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132365 5045 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132368 5045 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132372 5045 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132376 5045 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132381 5045 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132385 5045 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132389 5045 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132393 5045 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132397 5045 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132401 5045 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132406 5045 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132409 5045 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132413 5045 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132418 5045 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132422 5045 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132426 5045 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132430 5045 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132434 5045 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132438 5045 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.132442 5045 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133251 5045 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133263 5045 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133269 5045 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133273 5045 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133278 5045 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133282 5045 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133286 5045 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133289 5045 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133293 5045 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133297 5045 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133301 5045 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133304 5045 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133309 5045 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133316 5045 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133321 5045 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133325 5045 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133329 5045 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133332 5045 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133336 5045 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133341 5045 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133345 5045 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133349 5045 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133353 5045 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133356 5045 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133361 5045 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133365 5045 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133369 5045 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133372 5045 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133376 5045 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133379 5045 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133383 5045 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133387 5045 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133390 5045 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133395 5045 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133399 5045 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133403 5045 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133408 5045 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133412 5045 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133416 5045 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133419 5045 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133423 5045 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133427 5045 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133430 5045 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133434 5045 feature_gate.go:330] unrecognized feature gate: Example Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133437 5045 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133441 5045 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133445 5045 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.133453 5045 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133590 5045 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133597 5045 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133601 5045 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133605 5045 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133609 5045 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133612 5045 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133616 5045 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133619 5045 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133623 5045 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133626 5045 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133630 5045 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133633 5045 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133637 5045 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133640 5045 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133645 5045 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133651 5045 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133681 5045 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133686 5045 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133691 5045 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133696 5045 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133700 5045 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133704 5045 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133732 5045 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133742 5045 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133747 5045 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133752 5045 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133757 5045 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133762 5045 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133767 5045 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133772 5045 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133776 5045 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133780 5045 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133785 5045 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133789 5045 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133794 5045 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133799 5045 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133803 5045 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133807 5045 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133810 5045 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133814 5045 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133818 5045 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133821 5045 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133825 5045 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133828 5045 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133832 5045 feature_gate.go:330] unrecognized feature gate: Example Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133835 5045 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133839 5045 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133842 5045 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133846 5045 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133849 5045 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133854 5045 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133858 5045 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133861 5045 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133865 5045 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133868 5045 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133872 5045 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133875 5045 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133879 5045 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133882 5045 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133886 5045 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133889 5045 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133893 5045 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133896 5045 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133900 5045 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133903 5045 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133907 5045 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133910 5045 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133915 5045 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133920 5045 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133923 5045 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.133928 5045 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.133935 5045 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.134834 5045 server.go:940] "Client rotation is on, will bootstrap in background" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.142663 5045 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.142831 5045 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.144987 5045 server.go:997] "Starting client certificate rotation" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.145017 5045 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.145316 5045 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-29 07:00:41.592412534 +0000 UTC Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.145440 5045 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 800h1m37.446977295s for next certificate rotation Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.173100 5045 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.176885 5045 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.198902 5045 log.go:25] "Validated CRI v1 runtime API" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.244589 5045 log.go:25] "Validated CRI v1 image API" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.247694 5045 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.254487 5045 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-25-22-54-35-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.254535 5045 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.285798 5045 manager.go:217] Machine: {Timestamp:2025-11-25 22:59:04.281364083 +0000 UTC m=+0.639023275 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654124544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:f06a8c5f-301f-4137-af20-68ca464a7a49 BootID:3b8b2817-c700-40bf-9467-8031fbd1fc68 Filesystems:[{Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:96:32:be Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:96:32:be Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:fc:36:ee Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:84:9a:29 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:c2:1c:90 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:18:6d:c9 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:14:4f:37 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:46:08:1f:c9:08:73 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:0a:71:8d:64:b5:c5 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654124544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.286340 5045 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.286669 5045 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.288444 5045 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.288802 5045 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.288858 5045 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.289209 5045 topology_manager.go:138] "Creating topology manager with none policy" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.289229 5045 container_manager_linux.go:303] "Creating device plugin manager" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.289855 5045 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.289907 5045 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.290696 5045 state_mem.go:36] "Initialized new in-memory state store" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.291269 5045 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.297856 5045 kubelet.go:418] "Attempting to sync node with API server" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.297896 5045 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.297947 5045 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.297969 5045 kubelet.go:324] "Adding apiserver pod source" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.297990 5045 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.307454 5045 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.309037 5045 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.311153 5045 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.311860 5045 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.89:6443: connect: connection refused Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.311861 5045 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.89:6443: connect: connection refused Nov 25 22:59:04 crc kubenswrapper[5045]: E1125 22:59:04.312024 5045 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.89:6443: connect: connection refused" logger="UnhandledError" Nov 25 22:59:04 crc kubenswrapper[5045]: E1125 22:59:04.312030 5045 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.89:6443: connect: connection refused" logger="UnhandledError" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.313854 5045 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.313892 5045 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.313905 5045 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.313916 5045 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.313933 5045 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.313943 5045 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.313954 5045 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.313972 5045 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.313985 5045 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.313996 5045 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.314011 5045 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.314020 5045 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.314936 5045 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.315614 5045 server.go:1280] "Started kubelet" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.316883 5045 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.316881 5045 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.317201 5045 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.89:6443: connect: connection refused Nov 25 22:59:04 crc systemd[1]: Started Kubernetes Kubelet. Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.318490 5045 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.319312 5045 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.319367 5045 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.319654 5045 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 09:12:39.939712472 +0000 UTC Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.319774 5045 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 466h13m35.619945418s for next certificate rotation Nov 25 22:59:04 crc kubenswrapper[5045]: E1125 22:59:04.319921 5045 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.320020 5045 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.320054 5045 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.320235 5045 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.321794 5045 factory.go:55] Registering systemd factory Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.321882 5045 factory.go:221] Registration of the systemd container factory successfully Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.322698 5045 factory.go:153] Registering CRI-O factory Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.322902 5045 factory.go:221] Registration of the crio container factory successfully Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.323016 5045 server.go:460] "Adding debug handlers to kubelet server" Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.323178 5045 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.89:6443: connect: connection refused Nov 25 22:59:04 crc kubenswrapper[5045]: E1125 22:59:04.323321 5045 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.89:6443: connect: connection refused" logger="UnhandledError" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.323438 5045 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.323542 5045 factory.go:103] Registering Raw factory Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.323582 5045 manager.go:1196] Started watching for new ooms in manager Nov 25 22:59:04 crc kubenswrapper[5045]: E1125 22:59:04.324114 5045 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.89:6443: connect: connection refused" interval="200ms" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.326568 5045 manager.go:319] Starting recovery of all containers Nov 25 22:59:04 crc kubenswrapper[5045]: E1125 22:59:04.325847 5045 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.89:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b621a57187969 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 22:59:04.315574633 +0000 UTC m=+0.673233755,LastTimestamp:2025-11-25 22:59:04.315574633 +0000 UTC m=+0.673233755,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.349684 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350223 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350266 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350287 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350303 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350364 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350381 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350397 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350440 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350478 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350498 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350536 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350550 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350569 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350603 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350622 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350640 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350663 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350686 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350702 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350772 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350787 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350823 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350842 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350861 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350904 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350949 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.350968 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351010 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351026 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351067 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351087 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351121 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351136 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351151 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351166 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351183 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351200 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351242 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351256 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351273 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351310 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351336 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351359 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351394 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351414 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351431 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351469 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351519 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351569 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351585 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351602 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351650 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351667 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351685 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351703 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351734 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351749 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351786 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351826 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351842 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351859 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351899 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351916 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351933 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351976 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.351993 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.352027 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.352042 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.352056 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.352097 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.352114 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.354318 5045 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.354358 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.354376 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.355807 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.355872 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.355918 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.355937 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.355960 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.355977 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356015 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356031 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356068 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356086 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356103 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356143 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356162 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356178 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356219 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356238 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356276 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356292 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356328 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356346 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356387 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356402 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356417 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356439 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356457 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356475 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356492 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356511 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356561 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356599 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356654 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356695 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356726 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356744 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356785 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356802 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356842 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356883 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356900 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356918 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356954 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356970 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.356986 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357003 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357021 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357037 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357054 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357090 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357106 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357122 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357163 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357177 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357195 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357231 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357271 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357286 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357302 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357338 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357355 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357371 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357407 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357425 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357440 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357476 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357491 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357537 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357566 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357580 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357595 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357610 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357627 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357644 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357658 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357674 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.357689 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.358648 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.358776 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360198 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360262 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360287 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360313 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360334 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360355 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360378 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360404 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360429 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360450 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360474 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360496 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360517 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360540 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360562 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360587 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360608 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360631 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360652 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360676 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360702 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360750 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360773 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360798 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360820 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360843 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360866 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360887 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360908 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360933 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360955 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.360977 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361000 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361025 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361046 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361069 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361091 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361112 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361133 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361156 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361178 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361199 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361220 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361243 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361265 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361287 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361308 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361331 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361355 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361376 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361399 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361421 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361683 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361706 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361760 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361783 5045 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361803 5045 reconstruct.go:97] "Volume reconstruction finished" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.361819 5045 reconciler.go:26] "Reconciler: start to sync state" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.373619 5045 manager.go:324] Recovery completed Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.388419 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.392536 5045 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.394224 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.394282 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.394301 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.395178 5045 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.395285 5045 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.395343 5045 kubelet.go:2335] "Starting kubelet main sync loop" Nov 25 22:59:04 crc kubenswrapper[5045]: E1125 22:59:04.395422 5045 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.396267 5045 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.89:6443: connect: connection refused Nov 25 22:59:04 crc kubenswrapper[5045]: E1125 22:59:04.396337 5045 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.89:6443: connect: connection refused" logger="UnhandledError" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.396897 5045 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.396918 5045 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.396947 5045 state_mem.go:36] "Initialized new in-memory state store" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.412572 5045 policy_none.go:49] "None policy: Start" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.413520 5045 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.413592 5045 state_mem.go:35] "Initializing new in-memory state store" Nov 25 22:59:04 crc kubenswrapper[5045]: E1125 22:59:04.420069 5045 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.474342 5045 manager.go:334] "Starting Device Plugin manager" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.474427 5045 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.474450 5045 server.go:79] "Starting device plugin registration server" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.475205 5045 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.475253 5045 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.475548 5045 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.475808 5045 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.475852 5045 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 25 22:59:04 crc kubenswrapper[5045]: E1125 22:59:04.488669 5045 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.495891 5045 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc"] Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.496067 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.499421 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.499489 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.499515 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.499787 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.500059 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.500120 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.501245 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.501304 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.501325 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.501328 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.501382 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.501401 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.501789 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.501851 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.501962 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.503248 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.503379 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.503278 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.503489 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.503460 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.503512 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.503805 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.504151 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.504216 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.505350 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.505390 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.505401 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.505420 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.505428 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.505438 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.505698 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.505835 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.505898 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.506654 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.506785 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.506854 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.506981 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.506997 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.507168 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.507503 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.507615 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.510320 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.510380 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.510399 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:04 crc kubenswrapper[5045]: E1125 22:59:04.525772 5045 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.89:6443: connect: connection refused" interval="400ms" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.564053 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.564120 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.564169 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.564207 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.564241 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.564413 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.564477 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.564518 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.564552 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.564584 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.564620 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.564872 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.564955 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.565026 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.565103 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.576172 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.578348 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.578409 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.578431 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.578474 5045 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 22:59:04 crc kubenswrapper[5045]: E1125 22:59:04.579515 5045 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.89:6443: connect: connection refused" node="crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.666391 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.666473 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.666513 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.666549 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.666584 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.666620 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.666656 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.666691 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.666754 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.666792 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.666783 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.666789 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.666857 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.666808 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.666953 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.666905 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.666906 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.666827 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.667059 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.666836 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.667097 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.667145 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.666955 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.667194 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.667104 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.667265 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.667234 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.667418 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.667523 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.667413 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.780217 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.786452 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.786593 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.786625 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.786687 5045 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 22:59:04 crc kubenswrapper[5045]: E1125 22:59:04.787773 5045 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.89:6443: connect: connection refused" node="crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.829839 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.837743 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.862056 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.881105 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.882137 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-47c86fff62673e8b0b1b3d5347928d231aa9a8a6324d36d86cb7c53026fd2b5e WatchSource:0}: Error finding container 47c86fff62673e8b0b1b3d5347928d231aa9a8a6324d36d86cb7c53026fd2b5e: Status 404 returned error can't find the container with id 47c86fff62673e8b0b1b3d5347928d231aa9a8a6324d36d86cb7c53026fd2b5e Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.884201 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-d5e94bb174a2b1b6e0a89c0a20f2c3c7e7e0f4e04a859dafa8c3eb51c4d9ee1c WatchSource:0}: Error finding container d5e94bb174a2b1b6e0a89c0a20f2c3c7e7e0f4e04a859dafa8c3eb51c4d9ee1c: Status 404 returned error can't find the container with id d5e94bb174a2b1b6e0a89c0a20f2c3c7e7e0f4e04a859dafa8c3eb51c4d9ee1c Nov 25 22:59:04 crc kubenswrapper[5045]: I1125 22:59:04.887855 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.891785 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-ff2d7dcf66ca57ad2c3217a81f3e71a56a60fca02a57d3de62415163e0f8fd91 WatchSource:0}: Error finding container ff2d7dcf66ca57ad2c3217a81f3e71a56a60fca02a57d3de62415163e0f8fd91: Status 404 returned error can't find the container with id ff2d7dcf66ca57ad2c3217a81f3e71a56a60fca02a57d3de62415163e0f8fd91 Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.901374 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-09729c599b890cf5ee71536abc9ef65df702091a4317a6aa1ba5817261e4394c WatchSource:0}: Error finding container 09729c599b890cf5ee71536abc9ef65df702091a4317a6aa1ba5817261e4394c: Status 404 returned error can't find the container with id 09729c599b890cf5ee71536abc9ef65df702091a4317a6aa1ba5817261e4394c Nov 25 22:59:04 crc kubenswrapper[5045]: W1125 22:59:04.905081 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-c762bcb373816ec03da4b992d96039a1f5ff26bb72d2d55d86eb48f2fd820bc1 WatchSource:0}: Error finding container c762bcb373816ec03da4b992d96039a1f5ff26bb72d2d55d86eb48f2fd820bc1: Status 404 returned error can't find the container with id c762bcb373816ec03da4b992d96039a1f5ff26bb72d2d55d86eb48f2fd820bc1 Nov 25 22:59:04 crc kubenswrapper[5045]: E1125 22:59:04.927101 5045 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.89:6443: connect: connection refused" interval="800ms" Nov 25 22:59:05 crc kubenswrapper[5045]: I1125 22:59:05.188516 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:05 crc kubenswrapper[5045]: I1125 22:59:05.191270 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:05 crc kubenswrapper[5045]: I1125 22:59:05.191799 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:05 crc kubenswrapper[5045]: I1125 22:59:05.191821 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:05 crc kubenswrapper[5045]: I1125 22:59:05.191865 5045 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 22:59:05 crc kubenswrapper[5045]: E1125 22:59:05.192345 5045 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.89:6443: connect: connection refused" node="crc" Nov 25 22:59:05 crc kubenswrapper[5045]: W1125 22:59:05.199090 5045 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.89:6443: connect: connection refused Nov 25 22:59:05 crc kubenswrapper[5045]: E1125 22:59:05.199219 5045 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.89:6443: connect: connection refused" logger="UnhandledError" Nov 25 22:59:05 crc kubenswrapper[5045]: W1125 22:59:05.305173 5045 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.89:6443: connect: connection refused Nov 25 22:59:05 crc kubenswrapper[5045]: E1125 22:59:05.305331 5045 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.89:6443: connect: connection refused" logger="UnhandledError" Nov 25 22:59:05 crc kubenswrapper[5045]: I1125 22:59:05.318191 5045 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.89:6443: connect: connection refused Nov 25 22:59:05 crc kubenswrapper[5045]: I1125 22:59:05.401812 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"47c86fff62673e8b0b1b3d5347928d231aa9a8a6324d36d86cb7c53026fd2b5e"} Nov 25 22:59:05 crc kubenswrapper[5045]: I1125 22:59:05.403156 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d5e94bb174a2b1b6e0a89c0a20f2c3c7e7e0f4e04a859dafa8c3eb51c4d9ee1c"} Nov 25 22:59:05 crc kubenswrapper[5045]: I1125 22:59:05.405115 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c762bcb373816ec03da4b992d96039a1f5ff26bb72d2d55d86eb48f2fd820bc1"} Nov 25 22:59:05 crc kubenswrapper[5045]: I1125 22:59:05.406903 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"09729c599b890cf5ee71536abc9ef65df702091a4317a6aa1ba5817261e4394c"} Nov 25 22:59:05 crc kubenswrapper[5045]: I1125 22:59:05.408755 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ff2d7dcf66ca57ad2c3217a81f3e71a56a60fca02a57d3de62415163e0f8fd91"} Nov 25 22:59:05 crc kubenswrapper[5045]: W1125 22:59:05.571082 5045 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.89:6443: connect: connection refused Nov 25 22:59:05 crc kubenswrapper[5045]: E1125 22:59:05.571260 5045 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.89:6443: connect: connection refused" logger="UnhandledError" Nov 25 22:59:05 crc kubenswrapper[5045]: E1125 22:59:05.728658 5045 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.89:6443: connect: connection refused" interval="1.6s" Nov 25 22:59:05 crc kubenswrapper[5045]: W1125 22:59:05.817608 5045 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.89:6443: connect: connection refused Nov 25 22:59:05 crc kubenswrapper[5045]: E1125 22:59:05.817744 5045 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.89:6443: connect: connection refused" logger="UnhandledError" Nov 25 22:59:05 crc kubenswrapper[5045]: I1125 22:59:05.992976 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:05 crc kubenswrapper[5045]: I1125 22:59:05.995147 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:05 crc kubenswrapper[5045]: I1125 22:59:05.995223 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:05 crc kubenswrapper[5045]: I1125 22:59:05.995246 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:05 crc kubenswrapper[5045]: I1125 22:59:05.995294 5045 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 22:59:05 crc kubenswrapper[5045]: E1125 22:59:05.996126 5045 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.89:6443: connect: connection refused" node="crc" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.319101 5045 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.89:6443: connect: connection refused Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.416027 5045 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="57d85c9d70e22c55160692c2b290ede2224de3fdd2af0379d9bbcd8b30ed271b" exitCode=0 Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.416133 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"57d85c9d70e22c55160692c2b290ede2224de3fdd2af0379d9bbcd8b30ed271b"} Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.416181 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.417807 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.417867 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.417884 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.422645 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a"} Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.422704 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1"} Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.422739 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca"} Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.422754 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db"} Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.423204 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.424774 5045 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="2257716cada94ae9f8060f96c64abd6e7dd2807584e46b17cad31d804da2daef" exitCode=0 Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.424852 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"2257716cada94ae9f8060f96c64abd6e7dd2807584e46b17cad31d804da2daef"} Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.425076 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.425702 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.425762 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.425780 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.427375 5045 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf" exitCode=0 Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.427511 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf"} Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.427809 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.428009 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.428054 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.428077 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.429863 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.429909 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.429927 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.431773 5045 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="e55b36f09d94f86ed9af4b692033ea99f3568a302c25de06000fc30e78bf0210" exitCode=0 Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.431832 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"e55b36f09d94f86ed9af4b692033ea99f3568a302c25de06000fc30e78bf0210"} Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.431926 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.433056 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.433100 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.433118 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.433908 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.435815 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.435843 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:06 crc kubenswrapper[5045]: I1125 22:59:06.435863 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.318161 5045 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.89:6443: connect: connection refused Nov 25 22:59:07 crc kubenswrapper[5045]: E1125 22:59:07.329284 5045 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.89:6443: connect: connection refused" interval="3.2s" Nov 25 22:59:07 crc kubenswrapper[5045]: W1125 22:59:07.340297 5045 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.89:6443: connect: connection refused Nov 25 22:59:07 crc kubenswrapper[5045]: E1125 22:59:07.340413 5045 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.89:6443: connect: connection refused" logger="UnhandledError" Nov 25 22:59:07 crc kubenswrapper[5045]: W1125 22:59:07.384295 5045 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.89:6443: connect: connection refused Nov 25 22:59:07 crc kubenswrapper[5045]: E1125 22:59:07.384393 5045 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.89:6443: connect: connection refused" logger="UnhandledError" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.437835 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"237e4ca3eedcb7251aa8ae826c8d588c0dadd94658c90aa10b9c333480f90707"} Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.437968 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.439824 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.439872 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.439889 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.443263 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b"} Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.443304 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8"} Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.443317 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161"} Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.443333 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879"} Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.446647 5045 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="6b5b8466aff239f0cd94eb2954eda4f538c37d73166e8e729dcdafeba570c586" exitCode=0 Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.446700 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"6b5b8466aff239f0cd94eb2954eda4f538c37d73166e8e729dcdafeba570c586"} Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.446847 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.448091 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.448135 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.448149 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.454528 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.455040 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.455390 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"acb41a9b68b2aa00bbba92b627f8d231b0469068b708fb1e6f250f3c14e363c6"} Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.455448 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"7b74ba0f23b2ba55eee278b2de153692a78291fa66786fa6840a1bb36f7a3c88"} Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.455463 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"163631d8567e2cd04119578ef686e51759ec95295743893d37b2f80701c11a97"} Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.456234 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.456258 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.456272 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.456755 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.456971 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.456980 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.596602 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.597976 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.598044 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.598062 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:07 crc kubenswrapper[5045]: I1125 22:59:07.598104 5045 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 22:59:07 crc kubenswrapper[5045]: E1125 22:59:07.598872 5045 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.89:6443: connect: connection refused" node="crc" Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.464056 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1"} Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.464166 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.465975 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.466034 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.466059 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.468313 5045 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="16550f7d36bc83ef28cdc7cf4d4bba1623eb85447555ed4b1ca0b351b3abf599" exitCode=0 Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.468422 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"16550f7d36bc83ef28cdc7cf4d4bba1623eb85447555ed4b1ca0b351b3abf599"} Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.468595 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.468611 5045 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.468683 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.468611 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.470659 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.470678 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.470757 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.470815 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.470790 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.470856 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.470868 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.470879 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.470948 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:08 crc kubenswrapper[5045]: I1125 22:59:08.485099 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 22:59:09 crc kubenswrapper[5045]: I1125 22:59:09.475950 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"9a1f4562d8549431b67c248d6065ce39034a6700ef79a0fd64451c11d2deee54"} Nov 25 22:59:09 crc kubenswrapper[5045]: I1125 22:59:09.476454 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"1d1e9f9be6409a47e8aa79f147c95623953e5ded027fefdadd25dd37a87257c7"} Nov 25 22:59:09 crc kubenswrapper[5045]: I1125 22:59:09.476485 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"1e13e6131486fefdcb833873089b1946fe54c95d70ddf7dedb2bca2cd172e735"} Nov 25 22:59:09 crc kubenswrapper[5045]: I1125 22:59:09.476090 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:09 crc kubenswrapper[5045]: I1125 22:59:09.476032 5045 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 22:59:09 crc kubenswrapper[5045]: I1125 22:59:09.476571 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:09 crc kubenswrapper[5045]: I1125 22:59:09.478835 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:09 crc kubenswrapper[5045]: I1125 22:59:09.478890 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:09 crc kubenswrapper[5045]: I1125 22:59:09.478911 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:09 crc kubenswrapper[5045]: I1125 22:59:09.478938 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:09 crc kubenswrapper[5045]: I1125 22:59:09.478988 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:09 crc kubenswrapper[5045]: I1125 22:59:09.479005 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:10 crc kubenswrapper[5045]: I1125 22:59:10.485174 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ea39e26c7fcabc0edb2beae99d4102cdb2fb80ef88280e9db61061ff4767d764"} Nov 25 22:59:10 crc kubenswrapper[5045]: I1125 22:59:10.485240 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"806a8e4ca4724582a8ec9ade25919474895412376fece624e15dfee6d7af1aac"} Nov 25 22:59:10 crc kubenswrapper[5045]: I1125 22:59:10.485336 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:10 crc kubenswrapper[5045]: I1125 22:59:10.486615 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:10 crc kubenswrapper[5045]: I1125 22:59:10.486665 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:10 crc kubenswrapper[5045]: I1125 22:59:10.486753 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:10 crc kubenswrapper[5045]: I1125 22:59:10.799040 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:10 crc kubenswrapper[5045]: I1125 22:59:10.801051 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:10 crc kubenswrapper[5045]: I1125 22:59:10.801131 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:10 crc kubenswrapper[5045]: I1125 22:59:10.801152 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:10 crc kubenswrapper[5045]: I1125 22:59:10.801201 5045 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 22:59:11 crc kubenswrapper[5045]: I1125 22:59:11.286182 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 25 22:59:11 crc kubenswrapper[5045]: I1125 22:59:11.488919 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:11 crc kubenswrapper[5045]: I1125 22:59:11.490983 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:11 crc kubenswrapper[5045]: I1125 22:59:11.491079 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:11 crc kubenswrapper[5045]: I1125 22:59:11.491110 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:11 crc kubenswrapper[5045]: I1125 22:59:11.697250 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 22:59:11 crc kubenswrapper[5045]: I1125 22:59:11.697489 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:11 crc kubenswrapper[5045]: I1125 22:59:11.699277 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:11 crc kubenswrapper[5045]: I1125 22:59:11.699354 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:11 crc kubenswrapper[5045]: I1125 22:59:11.699379 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:12 crc kubenswrapper[5045]: I1125 22:59:12.091969 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 22:59:12 crc kubenswrapper[5045]: I1125 22:59:12.492166 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:12 crc kubenswrapper[5045]: I1125 22:59:12.492263 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:12 crc kubenswrapper[5045]: I1125 22:59:12.493928 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:12 crc kubenswrapper[5045]: I1125 22:59:12.493995 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:12 crc kubenswrapper[5045]: I1125 22:59:12.494018 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:12 crc kubenswrapper[5045]: I1125 22:59:12.494083 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:12 crc kubenswrapper[5045]: I1125 22:59:12.494126 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:12 crc kubenswrapper[5045]: I1125 22:59:12.494144 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:12 crc kubenswrapper[5045]: I1125 22:59:12.873311 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 22:59:13 crc kubenswrapper[5045]: I1125 22:59:13.050845 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 22:59:13 crc kubenswrapper[5045]: I1125 22:59:13.051172 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:13 crc kubenswrapper[5045]: I1125 22:59:13.053056 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:13 crc kubenswrapper[5045]: I1125 22:59:13.053112 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:13 crc kubenswrapper[5045]: I1125 22:59:13.053132 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:13 crc kubenswrapper[5045]: I1125 22:59:13.498352 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:13 crc kubenswrapper[5045]: I1125 22:59:13.500418 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:13 crc kubenswrapper[5045]: I1125 22:59:13.500950 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:13 crc kubenswrapper[5045]: I1125 22:59:13.502199 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:13 crc kubenswrapper[5045]: I1125 22:59:13.836669 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 22:59:13 crc kubenswrapper[5045]: I1125 22:59:13.836964 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:13 crc kubenswrapper[5045]: I1125 22:59:13.838701 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:13 crc kubenswrapper[5045]: I1125 22:59:13.838784 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:13 crc kubenswrapper[5045]: I1125 22:59:13.838798 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:13 crc kubenswrapper[5045]: I1125 22:59:13.845342 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 22:59:14 crc kubenswrapper[5045]: I1125 22:59:14.413021 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 22:59:14 crc kubenswrapper[5045]: E1125 22:59:14.488973 5045 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 22:59:14 crc kubenswrapper[5045]: I1125 22:59:14.500771 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:14 crc kubenswrapper[5045]: I1125 22:59:14.502482 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:14 crc kubenswrapper[5045]: I1125 22:59:14.502532 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:14 crc kubenswrapper[5045]: I1125 22:59:14.502546 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:14 crc kubenswrapper[5045]: I1125 22:59:14.631867 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 22:59:15 crc kubenswrapper[5045]: I1125 22:59:15.504223 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:15 crc kubenswrapper[5045]: I1125 22:59:15.506212 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:15 crc kubenswrapper[5045]: I1125 22:59:15.506285 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:15 crc kubenswrapper[5045]: I1125 22:59:15.506312 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:15 crc kubenswrapper[5045]: I1125 22:59:15.511250 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 22:59:16 crc kubenswrapper[5045]: I1125 22:59:16.508071 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:16 crc kubenswrapper[5045]: I1125 22:59:16.509906 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:16 crc kubenswrapper[5045]: I1125 22:59:16.509966 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:16 crc kubenswrapper[5045]: I1125 22:59:16.509987 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:17 crc kubenswrapper[5045]: I1125 22:59:17.509964 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:17 crc kubenswrapper[5045]: I1125 22:59:17.511152 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:17 crc kubenswrapper[5045]: I1125 22:59:17.511231 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:17 crc kubenswrapper[5045]: I1125 22:59:17.511252 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:17 crc kubenswrapper[5045]: I1125 22:59:17.632860 5045 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 22:59:17 crc kubenswrapper[5045]: I1125 22:59:17.633001 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 22:59:18 crc kubenswrapper[5045]: W1125 22:59:18.203927 5045 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 25 22:59:18 crc kubenswrapper[5045]: I1125 22:59:18.204082 5045 trace.go:236] Trace[1245533522]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 22:59:08.202) (total time: 10001ms): Nov 25 22:59:18 crc kubenswrapper[5045]: Trace[1245533522]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (22:59:18.203) Nov 25 22:59:18 crc kubenswrapper[5045]: Trace[1245533522]: [10.001708561s] [10.001708561s] END Nov 25 22:59:18 crc kubenswrapper[5045]: E1125 22:59:18.204121 5045 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 25 22:59:18 crc kubenswrapper[5045]: I1125 22:59:18.318705 5045 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 25 22:59:18 crc kubenswrapper[5045]: I1125 22:59:18.514887 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 22:59:18 crc kubenswrapper[5045]: I1125 22:59:18.517568 5045 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1" exitCode=255 Nov 25 22:59:18 crc kubenswrapper[5045]: I1125 22:59:18.517638 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1"} Nov 25 22:59:18 crc kubenswrapper[5045]: I1125 22:59:18.517882 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:18 crc kubenswrapper[5045]: I1125 22:59:18.519238 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:18 crc kubenswrapper[5045]: I1125 22:59:18.519307 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:18 crc kubenswrapper[5045]: I1125 22:59:18.519328 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:18 crc kubenswrapper[5045]: I1125 22:59:18.520283 5045 scope.go:117] "RemoveContainer" containerID="384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1" Nov 25 22:59:18 crc kubenswrapper[5045]: I1125 22:59:18.699565 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 25 22:59:18 crc kubenswrapper[5045]: I1125 22:59:18.699938 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:18 crc kubenswrapper[5045]: I1125 22:59:18.701407 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:18 crc kubenswrapper[5045]: I1125 22:59:18.701457 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:18 crc kubenswrapper[5045]: I1125 22:59:18.701467 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:18 crc kubenswrapper[5045]: I1125 22:59:18.756995 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 25 22:59:18 crc kubenswrapper[5045]: W1125 22:59:18.765753 5045 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 25 22:59:18 crc kubenswrapper[5045]: I1125 22:59:18.765917 5045 trace.go:236] Trace[535659369]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 22:59:08.763) (total time: 10002ms): Nov 25 22:59:18 crc kubenswrapper[5045]: Trace[535659369]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (22:59:18.765) Nov 25 22:59:18 crc kubenswrapper[5045]: Trace[535659369]: [10.002542775s] [10.002542775s] END Nov 25 22:59:18 crc kubenswrapper[5045]: E1125 22:59:18.765964 5045 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 25 22:59:19 crc kubenswrapper[5045]: I1125 22:59:19.091854 5045 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 25 22:59:19 crc kubenswrapper[5045]: I1125 22:59:19.091988 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 25 22:59:19 crc kubenswrapper[5045]: I1125 22:59:19.097699 5045 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 25 22:59:19 crc kubenswrapper[5045]: I1125 22:59:19.097787 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 25 22:59:19 crc kubenswrapper[5045]: I1125 22:59:19.526629 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 22:59:19 crc kubenswrapper[5045]: I1125 22:59:19.529078 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478"} Nov 25 22:59:19 crc kubenswrapper[5045]: I1125 22:59:19.529209 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:19 crc kubenswrapper[5045]: I1125 22:59:19.529380 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:19 crc kubenswrapper[5045]: I1125 22:59:19.530118 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:19 crc kubenswrapper[5045]: I1125 22:59:19.530155 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:19 crc kubenswrapper[5045]: I1125 22:59:19.530170 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:19 crc kubenswrapper[5045]: I1125 22:59:19.530450 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:19 crc kubenswrapper[5045]: I1125 22:59:19.530479 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:19 crc kubenswrapper[5045]: I1125 22:59:19.530488 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:19 crc kubenswrapper[5045]: I1125 22:59:19.544856 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 25 22:59:20 crc kubenswrapper[5045]: I1125 22:59:20.546900 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:20 crc kubenswrapper[5045]: I1125 22:59:20.548452 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:20 crc kubenswrapper[5045]: I1125 22:59:20.548509 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:20 crc kubenswrapper[5045]: I1125 22:59:20.548530 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:21 crc kubenswrapper[5045]: I1125 22:59:21.456011 5045 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 22:59:21 crc kubenswrapper[5045]: I1125 22:59:21.697787 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.310107 5045 apiserver.go:52] "Watching apiserver" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.316880 5045 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.318681 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf"] Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.321154 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.321351 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.321414 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.321505 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.321524 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.321600 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:22 crc kubenswrapper[5045]: E1125 22:59:22.321891 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 22:59:22 crc kubenswrapper[5045]: E1125 22:59:22.322108 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 22:59:22 crc kubenswrapper[5045]: E1125 22:59:22.322234 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.322610 5045 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.325453 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.328516 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.328530 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.329240 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.329279 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.329519 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.329240 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.329892 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.330140 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.371664 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.396214 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.416018 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.436052 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.450232 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.466895 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.486646 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.504860 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.521540 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.885987 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.899582 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.914766 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.941387 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.956032 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.967958 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.980889 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:22 crc kubenswrapper[5045]: I1125 22:59:22.991202 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:23 crc kubenswrapper[5045]: I1125 22:59:23.563995 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 22:59:23 crc kubenswrapper[5045]: I1125 22:59:23.582362 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:23 crc kubenswrapper[5045]: I1125 22:59:23.596880 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:23 crc kubenswrapper[5045]: I1125 22:59:23.609114 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:23 crc kubenswrapper[5045]: I1125 22:59:23.625061 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:23 crc kubenswrapper[5045]: I1125 22:59:23.642229 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:23 crc kubenswrapper[5045]: I1125 22:59:23.657328 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:23 crc kubenswrapper[5045]: I1125 22:59:23.672509 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.092520 5045 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.094168 5045 trace.go:236] Trace[1095504581]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 22:59:13.625) (total time: 10468ms): Nov 25 22:59:24 crc kubenswrapper[5045]: Trace[1095504581]: ---"Objects listed" error: 10468ms (22:59:24.094) Nov 25 22:59:24 crc kubenswrapper[5045]: Trace[1095504581]: [10.468166163s] [10.468166163s] END Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.094206 5045 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.097736 5045 trace.go:236] Trace[1187910603]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 22:59:13.779) (total time: 10317ms): Nov 25 22:59:24 crc kubenswrapper[5045]: Trace[1187910603]: ---"Objects listed" error: 10317ms (22:59:24.097) Nov 25 22:59:24 crc kubenswrapper[5045]: Trace[1187910603]: [10.317697401s] [10.317697401s] END Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.098200 5045 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.098029 5045 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.100483 5045 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.201370 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.201856 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.202063 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.202214 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.202247 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.202382 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.202441 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.202496 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.202705 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.202874 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.202958 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.203248 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.203386 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.203478 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.203131 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.203915 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.204049 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.205044 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.205386 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.205510 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.205615 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.206787 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.206881 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.208018 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.208107 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.208157 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.208629 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.208828 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.209016 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.209270 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.209306 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.209341 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.209682 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.209844 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.210176 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.211234 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.211326 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.212599 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.212973 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.213180 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.213372 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.214060 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.214347 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.215103 5045 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.215268 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.222174 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.222458 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.222489 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.222517 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.222545 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.222570 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.222599 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.222629 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.222668 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.222700 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.222779 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.222819 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.222850 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.222887 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.222925 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.222960 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.222998 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223039 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223079 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223111 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223147 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223182 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223219 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223252 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223286 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223323 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223357 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223393 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223429 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223466 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223499 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223535 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223570 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223605 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223647 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223684 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223741 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223779 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223818 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223852 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223877 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223911 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223936 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223961 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.223983 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224008 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224046 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224078 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224102 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224131 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224160 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224183 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224231 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224259 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224287 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224320 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224371 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224410 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224441 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224476 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224508 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224539 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224573 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224609 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224648 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224682 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224741 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224779 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224824 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224860 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224901 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224934 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.224975 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225009 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225039 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225076 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225114 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225154 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225186 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225223 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225261 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225294 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225329 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225367 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225403 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225441 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225481 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225528 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225560 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225599 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225638 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225678 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225744 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225787 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225827 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225862 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225898 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225930 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225957 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.225987 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.226016 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.226048 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.226395 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.226454 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.227330 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.228011 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.228427 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.229117 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.229501 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.229804 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230134 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230190 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230220 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230251 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230280 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230314 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230340 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230374 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230403 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230429 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230457 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230485 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230511 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230539 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230571 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230601 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230626 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230655 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230804 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230837 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230866 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230894 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230919 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230949 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.230979 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.231005 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.231032 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.231061 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.231087 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.231115 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.231146 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.231144 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.231177 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.231203 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.231231 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.231260 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.231284 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.231314 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.231345 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.231373 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.231420 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.231445 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.231474 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.231928 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.232435 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.231502 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.232737 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.232765 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.232796 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.232807 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.232825 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.232852 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.232882 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.232909 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.232937 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.232945 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.232960 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.233121 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.233186 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.233240 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.233297 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.233356 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.233405 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.233463 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.233516 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.233564 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.233128 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.233223 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.233275 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.233444 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.233453 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.233597 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 22:59:24.733562408 +0000 UTC m=+21.091221530 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.240373 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.240412 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.240442 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.240468 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.240509 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.240534 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.240560 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.240585 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.240647 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.240677 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.240733 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.240734 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.240771 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.240763 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.240863 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.240898 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.240932 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.241095 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.241194 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.241243 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.241285 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.241322 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.241364 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.241401 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.241440 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.241488 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.233602 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.233811 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.233994 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.234022 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.234384 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.234386 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.234359 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.234434 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.234693 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.234774 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.234792 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.239191 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.239236 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.239363 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.239612 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.239645 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.239874 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.239919 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.239956 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.242322 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.242585 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.243007 5045 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.242185 5045 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.242809 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.244015 5045 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.244893 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.244903 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.243825 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.243661 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.244209 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.244343 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.244967 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.243476 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.245250 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.245294 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:24.745257428 +0000 UTC m=+21.102916550 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.245481 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.245493 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.245880 5045 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.246024 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.246058 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.246279 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.246356 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.245916 5045 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.246556 5045 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.246582 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.246670 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.246849 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.246950 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.247619 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.248296 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.248354 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.249393 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.249418 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.249443 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.249776 5045 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.249825 5045 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.249893 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.249935 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.249968 5045 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.249999 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.250030 5045 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.250058 5045 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.250087 5045 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.250119 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.250154 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.250267 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.251007 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.251423 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.251476 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.250189 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.251593 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.251614 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.251629 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.251666 5045 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.251692 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.251731 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.251746 5045 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.251759 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.251772 5045 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.251921 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.252093 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.252688 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.252988 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.253244 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.253983 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.254859 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.254891 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.255491 5045 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.255506 5045 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.255517 5045 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.255529 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.255582 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.255595 5045 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.255604 5045 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.255615 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.255626 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.255636 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.255646 5045 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.255192 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.256020 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.256346 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.256746 5045 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.259274 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.259854 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.259957 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.260505 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.260856 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.262273 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.265463 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:24.765383216 +0000 UTC m=+21.123042418 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.265597 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.268964 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.272901 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.272947 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.274463 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.274509 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.274529 5045 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.274604 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:24.774580795 +0000 UTC m=+21.132240087 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.274792 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.275464 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.275693 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.275841 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.275988 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.278355 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.278360 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.278778 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.278815 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.278977 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.279035 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.279055 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.279166 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.279318 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.279309 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.279694 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.279764 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.279794 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.279883 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.279892 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.280108 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.280237 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.283510 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.283535 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.283547 5045 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.283574 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.283599 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:24.783579489 +0000 UTC m=+21.141238601 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.284433 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.285397 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.285491 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.285532 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.285756 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.285993 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.285997 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.286211 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.286396 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.286419 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.286880 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.287151 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.287431 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.288097 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.288130 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.288150 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.288479 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.288560 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.288692 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.288738 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.288909 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.288844 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.289036 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.289233 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.289354 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.289442 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.289518 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.289659 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.289887 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.289957 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.290051 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.290100 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.290129 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.290248 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.290303 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.290557 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.290797 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.291102 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.291728 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.293417 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.293471 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.293881 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.294009 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.296457 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.296967 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.297984 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.303179 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.306532 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.307780 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.307917 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.308020 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.308094 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.308145 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.308205 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.308374 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.308874 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.310207 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.311312 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.320638 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.321984 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.323460 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.324215 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.324849 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.325345 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.325529 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.332462 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.336229 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.357846 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.357913 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.358056 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.358074 5045 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.358090 5045 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.358103 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.358115 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.358127 5045 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.358283 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.360605 5045 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.360646 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.360658 5045 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.360669 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.360681 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.360693 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.360721 5045 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.360738 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.360751 5045 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.360763 5045 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.359281 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.360774 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.360933 5045 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.360954 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.360993 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361012 5045 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361035 5045 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361052 5045 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361066 5045 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361078 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361092 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361110 5045 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361123 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361136 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361151 5045 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361164 5045 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361176 5045 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361192 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361204 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361218 5045 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361229 5045 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361243 5045 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361255 5045 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361268 5045 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361280 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361292 5045 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361304 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361316 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361330 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361343 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361356 5045 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361367 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361381 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361398 5045 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361414 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361428 5045 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361443 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361459 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361472 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361484 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361497 5045 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361510 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361523 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361535 5045 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361546 5045 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361562 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361574 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361586 5045 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361599 5045 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361611 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361622 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361635 5045 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361648 5045 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361660 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361674 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361687 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361699 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361731 5045 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361747 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361759 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361772 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361784 5045 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361795 5045 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361808 5045 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361819 5045 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361831 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361843 5045 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361855 5045 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361865 5045 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361878 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361891 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361904 5045 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361916 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361931 5045 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361943 5045 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361956 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361968 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361980 5045 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.361992 5045 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362004 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362015 5045 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362027 5045 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362017 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362039 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362054 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362068 5045 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362083 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362096 5045 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362108 5045 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362122 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362137 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362152 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362166 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362183 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362197 5045 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362210 5045 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362223 5045 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362236 5045 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362249 5045 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362262 5045 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362274 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362288 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362301 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362313 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362326 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362338 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362349 5045 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362362 5045 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362375 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362387 5045 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362401 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362413 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362425 5045 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362438 5045 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362451 5045 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362465 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362479 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362492 5045 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362504 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362515 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362528 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362541 5045 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362554 5045 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362566 5045 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362580 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362593 5045 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362606 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362619 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362632 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362645 5045 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362656 5045 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362669 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.362682 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.359333 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.373615 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.383677 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.389739 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.400032 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.400160 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.400308 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.400354 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.400400 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.400438 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.404061 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.404771 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.406757 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.407423 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.413750 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.414295 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.415091 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.419189 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.419948 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.421157 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.421796 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.426301 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.427242 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.427902 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.429684 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.430237 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.431215 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.431588 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.432206 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.436563 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.437192 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.438469 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.439093 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.440398 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.440910 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.441621 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.442998 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.443591 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.445313 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.445924 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.446426 5045 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.447089 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.447621 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.448964 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.449453 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.450360 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.452139 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.452871 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.453958 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.454661 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.456484 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.457107 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.458107 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.458705 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.459655 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.460193 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.461104 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.462051 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.462792 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.464652 5045 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.464668 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.464679 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.464688 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.464765 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.465118 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.465981 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.465995 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.466562 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.467963 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.468701 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.469983 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.475686 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.481821 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.506312 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.518818 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.535430 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.577122 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"a8679c74d5fe7c14f9217fdf8ba2cf5a3166defe5603d4c191a25998237b3b15"} Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.587287 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.587415 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"1d705a691520e8d036d2594d6dbe8531a63adf8beb0fd1745277a7dae8676d3a"} Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.589740 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"75d25b3271b5350fdd1b3c2937d5e56510d0ce6a0c3c3b5e563840eb4a05eacb"} Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.609150 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.768537 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.768675 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 22:59:25.768649691 +0000 UTC m=+22.126308803 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.769140 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.769336 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.769599 5045 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.769775 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:25.769754352 +0000 UTC m=+22.127413654 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.769864 5045 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.770031 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:25.770019089 +0000 UTC m=+22.127678211 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.870266 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:24 crc kubenswrapper[5045]: I1125 22:59:24.870683 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.870490 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.871027 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.871116 5045 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.871249 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:25.871230954 +0000 UTC m=+22.228890066 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.870926 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.871413 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.871489 5045 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:24 crc kubenswrapper[5045]: E1125 22:59:24.871606 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:25.871595295 +0000 UTC m=+22.229254407 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.224534 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.233785 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.250161 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.266515 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.272512 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-ht6dm"] Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.272841 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.275077 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-lbrq8"] Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.275462 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-lbrq8" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.279149 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.279207 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.279240 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.281365 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.283303 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.285769 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-pqpcg"] Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.286410 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.287016 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.288466 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.288497 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.290336 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.290993 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.291015 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.293234 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-7dpm4"] Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.293650 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.297504 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.297547 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.297659 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.299048 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.299281 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.313119 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.336371 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.361034 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.375545 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bc394db7-8b38-4abe-841d-83a3ea3d07b0-proxy-tls\") pod \"machine-config-daemon-7dpm4\" (UID: \"bc394db7-8b38-4abe-841d-83a3ea3d07b0\") " pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.375586 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/07acb2e0-1638-4174-8f01-b08385fca2dc-os-release\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.375608 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/bc394db7-8b38-4abe-841d-83a3ea3d07b0-rootfs\") pod \"machine-config-daemon-7dpm4\" (UID: \"bc394db7-8b38-4abe-841d-83a3ea3d07b0\") " pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.375631 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntr5v\" (UniqueName: \"kubernetes.io/projected/bc394db7-8b38-4abe-841d-83a3ea3d07b0-kube-api-access-ntr5v\") pod \"machine-config-daemon-7dpm4\" (UID: \"bc394db7-8b38-4abe-841d-83a3ea3d07b0\") " pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.375648 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/07acb2e0-1638-4174-8f01-b08385fca2dc-cnibin\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.375694 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-multus-cni-dir\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.375738 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-host-run-netns\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.375761 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-host-var-lib-cni-bin\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.375786 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-host-var-lib-cni-multus\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.375869 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/07acb2e0-1638-4174-8f01-b08385fca2dc-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.375925 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zj4hx\" (UniqueName: \"kubernetes.io/projected/a9143958-692e-41d3-970d-ffdd160f8524-kube-api-access-zj4hx\") pod \"node-resolver-lbrq8\" (UID: \"a9143958-692e-41d3-970d-ffdd160f8524\") " pod="openshift-dns/node-resolver-lbrq8" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.376056 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-os-release\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.376104 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/e971a47d-97d5-4a21-a255-2497b2b3cbbc-cni-binary-copy\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.376128 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-hostroot\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.376151 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/07acb2e0-1638-4174-8f01-b08385fca2dc-tuning-conf-dir\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.376174 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dwhw\" (UniqueName: \"kubernetes.io/projected/07acb2e0-1638-4174-8f01-b08385fca2dc-kube-api-access-7dwhw\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.376202 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/07acb2e0-1638-4174-8f01-b08385fca2dc-system-cni-dir\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.376256 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-cnibin\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.376273 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-host-var-lib-kubelet\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.376287 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/07acb2e0-1638-4174-8f01-b08385fca2dc-cni-binary-copy\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.376302 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/a9143958-692e-41d3-970d-ffdd160f8524-hosts-file\") pod \"node-resolver-lbrq8\" (UID: \"a9143958-692e-41d3-970d-ffdd160f8524\") " pod="openshift-dns/node-resolver-lbrq8" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.376325 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-multus-socket-dir-parent\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.376340 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/e971a47d-97d5-4a21-a255-2497b2b3cbbc-multus-daemon-config\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.376372 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-multus-conf-dir\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.376395 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-etc-kubernetes\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.376437 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jjrv\" (UniqueName: \"kubernetes.io/projected/e971a47d-97d5-4a21-a255-2497b2b3cbbc-kube-api-access-4jjrv\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.376472 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-system-cni-dir\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.376495 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-host-run-k8s-cni-cncf-io\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.376532 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bc394db7-8b38-4abe-841d-83a3ea3d07b0-mcd-auth-proxy-config\") pod \"machine-config-daemon-7dpm4\" (UID: \"bc394db7-8b38-4abe-841d-83a3ea3d07b0\") " pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.376610 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-host-run-multus-certs\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.396114 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.408751 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.421521 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.434237 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.445956 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.457143 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.467656 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.477192 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-multus-conf-dir\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.477249 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-etc-kubernetes\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.477275 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jjrv\" (UniqueName: \"kubernetes.io/projected/e971a47d-97d5-4a21-a255-2497b2b3cbbc-kube-api-access-4jjrv\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.477301 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-system-cni-dir\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.477325 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-host-run-k8s-cni-cncf-io\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.477325 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-multus-conf-dir\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.477356 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bc394db7-8b38-4abe-841d-83a3ea3d07b0-mcd-auth-proxy-config\") pod \"machine-config-daemon-7dpm4\" (UID: \"bc394db7-8b38-4abe-841d-83a3ea3d07b0\") " pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.477396 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-etc-kubernetes\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.477414 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-host-run-k8s-cni-cncf-io\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.477414 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-system-cni-dir\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.477412 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-host-run-multus-certs\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.477481 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-host-run-multus-certs\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.477508 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bc394db7-8b38-4abe-841d-83a3ea3d07b0-proxy-tls\") pod \"machine-config-daemon-7dpm4\" (UID: \"bc394db7-8b38-4abe-841d-83a3ea3d07b0\") " pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.477890 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/07acb2e0-1638-4174-8f01-b08385fca2dc-os-release\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.477533 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/07acb2e0-1638-4174-8f01-b08385fca2dc-os-release\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.477993 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/bc394db7-8b38-4abe-841d-83a3ea3d07b0-rootfs\") pod \"machine-config-daemon-7dpm4\" (UID: \"bc394db7-8b38-4abe-841d-83a3ea3d07b0\") " pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.478038 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntr5v\" (UniqueName: \"kubernetes.io/projected/bc394db7-8b38-4abe-841d-83a3ea3d07b0-kube-api-access-ntr5v\") pod \"machine-config-daemon-7dpm4\" (UID: \"bc394db7-8b38-4abe-841d-83a3ea3d07b0\") " pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.478073 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/07acb2e0-1638-4174-8f01-b08385fca2dc-cnibin\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.478104 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-multus-cni-dir\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.478129 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-host-run-netns\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.478177 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-host-var-lib-cni-bin\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.478211 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-host-var-lib-cni-multus\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.478242 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/07acb2e0-1638-4174-8f01-b08385fca2dc-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.478576 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/bc394db7-8b38-4abe-841d-83a3ea3d07b0-rootfs\") pod \"machine-config-daemon-7dpm4\" (UID: \"bc394db7-8b38-4abe-841d-83a3ea3d07b0\") " pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.478756 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-host-run-netns\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.478813 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-host-var-lib-cni-bin\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.478855 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/07acb2e0-1638-4174-8f01-b08385fca2dc-cnibin\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.478867 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-host-var-lib-cni-multus\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.478907 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bc394db7-8b38-4abe-841d-83a3ea3d07b0-mcd-auth-proxy-config\") pod \"machine-config-daemon-7dpm4\" (UID: \"bc394db7-8b38-4abe-841d-83a3ea3d07b0\") " pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.479013 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-multus-cni-dir\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.479580 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/07acb2e0-1638-4174-8f01-b08385fca2dc-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.479681 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zj4hx\" (UniqueName: \"kubernetes.io/projected/a9143958-692e-41d3-970d-ffdd160f8524-kube-api-access-zj4hx\") pod \"node-resolver-lbrq8\" (UID: \"a9143958-692e-41d3-970d-ffdd160f8524\") " pod="openshift-dns/node-resolver-lbrq8" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.479757 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-os-release\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.479799 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/e971a47d-97d5-4a21-a255-2497b2b3cbbc-cni-binary-copy\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.479830 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-hostroot\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.479875 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/07acb2e0-1638-4174-8f01-b08385fca2dc-tuning-conf-dir\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.479904 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dwhw\" (UniqueName: \"kubernetes.io/projected/07acb2e0-1638-4174-8f01-b08385fca2dc-kube-api-access-7dwhw\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.479941 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/07acb2e0-1638-4174-8f01-b08385fca2dc-system-cni-dir\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.479978 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-cnibin\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.480012 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-host-var-lib-kubelet\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.480084 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/07acb2e0-1638-4174-8f01-b08385fca2dc-cni-binary-copy\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.480122 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/a9143958-692e-41d3-970d-ffdd160f8524-hosts-file\") pod \"node-resolver-lbrq8\" (UID: \"a9143958-692e-41d3-970d-ffdd160f8524\") " pod="openshift-dns/node-resolver-lbrq8" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.480176 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-multus-socket-dir-parent\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.480213 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/e971a47d-97d5-4a21-a255-2497b2b3cbbc-multus-daemon-config\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.480947 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/e971a47d-97d5-4a21-a255-2497b2b3cbbc-multus-daemon-config\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.481161 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-os-release\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.481244 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/07acb2e0-1638-4174-8f01-b08385fca2dc-system-cni-dir\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.481772 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-cnibin\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.481854 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-host-var-lib-kubelet\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.481875 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/e971a47d-97d5-4a21-a255-2497b2b3cbbc-cni-binary-copy\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.481956 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-hostroot\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.482473 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/07acb2e0-1638-4174-8f01-b08385fca2dc-cni-binary-copy\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.482478 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/a9143958-692e-41d3-970d-ffdd160f8524-hosts-file\") pod \"node-resolver-lbrq8\" (UID: \"a9143958-692e-41d3-970d-ffdd160f8524\") " pod="openshift-dns/node-resolver-lbrq8" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.482537 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/e971a47d-97d5-4a21-a255-2497b2b3cbbc-multus-socket-dir-parent\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.485384 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.488677 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/07acb2e0-1638-4174-8f01-b08385fca2dc-tuning-conf-dir\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.493033 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bc394db7-8b38-4abe-841d-83a3ea3d07b0-proxy-tls\") pod \"machine-config-daemon-7dpm4\" (UID: \"bc394db7-8b38-4abe-841d-83a3ea3d07b0\") " pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.500971 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dwhw\" (UniqueName: \"kubernetes.io/projected/07acb2e0-1638-4174-8f01-b08385fca2dc-kube-api-access-7dwhw\") pod \"multus-additional-cni-plugins-pqpcg\" (UID: \"07acb2e0-1638-4174-8f01-b08385fca2dc\") " pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.501208 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zj4hx\" (UniqueName: \"kubernetes.io/projected/a9143958-692e-41d3-970d-ffdd160f8524-kube-api-access-zj4hx\") pod \"node-resolver-lbrq8\" (UID: \"a9143958-692e-41d3-970d-ffdd160f8524\") " pod="openshift-dns/node-resolver-lbrq8" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.506637 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jjrv\" (UniqueName: \"kubernetes.io/projected/e971a47d-97d5-4a21-a255-2497b2b3cbbc-kube-api-access-4jjrv\") pod \"multus-ht6dm\" (UID: \"e971a47d-97d5-4a21-a255-2497b2b3cbbc\") " pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.508377 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.511412 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntr5v\" (UniqueName: \"kubernetes.io/projected/bc394db7-8b38-4abe-841d-83a3ea3d07b0-kube-api-access-ntr5v\") pod \"machine-config-daemon-7dpm4\" (UID: \"bc394db7-8b38-4abe-841d-83a3ea3d07b0\") " pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.521139 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.532129 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.544181 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.555189 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.567264 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.587976 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-ht6dm" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.593809 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447"} Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.596024 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-lbrq8" Nov 25 22:59:25 crc kubenswrapper[5045]: W1125 22:59:25.600730 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode971a47d_97d5_4a21_a255_2497b2b3cbbc.slice/crio-071800add687bee8c66db79ee37001303df901cca8f9b306156128d4e1644d9b WatchSource:0}: Error finding container 071800add687bee8c66db79ee37001303df901cca8f9b306156128d4e1644d9b: Status 404 returned error can't find the container with id 071800add687bee8c66db79ee37001303df901cca8f9b306156128d4e1644d9b Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.603842 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.610821 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 22:59:25 crc kubenswrapper[5045]: W1125 22:59:25.629287 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda9143958_692e_41d3_970d_ffdd160f8524.slice/crio-4af2b0e0cf89596e0817a260133772496e4210713daa01c12eb3fea5ae84deed WatchSource:0}: Error finding container 4af2b0e0cf89596e0817a260133772496e4210713daa01c12eb3fea5ae84deed: Status 404 returned error can't find the container with id 4af2b0e0cf89596e0817a260133772496e4210713daa01c12eb3fea5ae84deed Nov 25 22:59:25 crc kubenswrapper[5045]: W1125 22:59:25.631195 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod07acb2e0_1638_4174_8f01_b08385fca2dc.slice/crio-677fec445daca99921e869f9545ef15bd5da33ca29ce3371c1160fc3ad801555 WatchSource:0}: Error finding container 677fec445daca99921e869f9545ef15bd5da33ca29ce3371c1160fc3ad801555: Status 404 returned error can't find the container with id 677fec445daca99921e869f9545ef15bd5da33ca29ce3371c1160fc3ad801555 Nov 25 22:59:25 crc kubenswrapper[5045]: W1125 22:59:25.644004 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbc394db7_8b38_4abe_841d_83a3ea3d07b0.slice/crio-21a6731018ff81510ec88e5575a5a81cceca3fa3c9eb65c28bd23e0dfbbf8022 WatchSource:0}: Error finding container 21a6731018ff81510ec88e5575a5a81cceca3fa3c9eb65c28bd23e0dfbbf8022: Status 404 returned error can't find the container with id 21a6731018ff81510ec88e5575a5a81cceca3fa3c9eb65c28bd23e0dfbbf8022 Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.668932 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mrsr4"] Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.669927 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.673205 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.673207 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.673616 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.673889 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.674012 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.674214 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.674401 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.686573 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.697467 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.706544 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.719876 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.782276 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.782415 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.782453 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-run-netns\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.782486 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-etc-openvswitch\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.782507 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-kubelet\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: E1125 22:59:25.782540 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 22:59:27.782504528 +0000 UTC m=+24.140163650 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.782604 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-log-socket\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.782652 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-ovnkube-script-lib\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: E1125 22:59:25.782692 5045 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 22:59:25 crc kubenswrapper[5045]: E1125 22:59:25.782781 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:27.782760985 +0000 UTC m=+24.140420307 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.782692 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-systemd-units\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.782840 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lg5r\" (UniqueName: \"kubernetes.io/projected/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-kube-api-access-8lg5r\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.782887 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-ovnkube-config\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.782914 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-run-ovn\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.782982 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-cni-netd\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.783211 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-run-openvswitch\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.783355 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-node-log\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.783593 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-run-ovn-kubernetes\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.783704 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-slash\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.783832 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-run-systemd\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.783899 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-cni-bin\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.783974 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.784058 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-var-lib-openvswitch\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.784139 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-env-overrides\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.784210 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-ovn-node-metrics-cert\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.784294 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:25 crc kubenswrapper[5045]: E1125 22:59:25.784428 5045 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 22:59:25 crc kubenswrapper[5045]: E1125 22:59:25.784469 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:27.784460433 +0000 UTC m=+24.142119545 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.785959 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.799310 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.825981 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.839511 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.848180 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.872227 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885209 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-run-ovn\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885251 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-ovnkube-config\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885274 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-cni-netd\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885297 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-run-openvswitch\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885315 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-node-log\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885336 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-run-ovn-kubernetes\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885348 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-run-ovn\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885358 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885383 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-slash\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885409 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-cni-netd\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885472 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-node-log\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885483 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-slash\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885518 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-cni-bin\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885511 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-run-ovn-kubernetes\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885554 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-run-openvswitch\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885601 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-cni-bin\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: E1125 22:59:25.885566 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885625 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: E1125 22:59:25.885654 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885667 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-run-systemd\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: E1125 22:59:25.885677 5045 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885695 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885697 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-ovn-node-metrics-cert\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: E1125 22:59:25.885753 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:27.885732798 +0000 UTC m=+24.243391910 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885755 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-run-systemd\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885780 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-var-lib-openvswitch\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885812 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-var-lib-openvswitch\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885834 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-env-overrides\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885878 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885939 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-run-netns\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885960 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-etc-openvswitch\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.885982 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-kubelet\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.886003 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-log-socket\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: E1125 22:59:25.886050 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 22:59:25 crc kubenswrapper[5045]: E1125 22:59:25.886070 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.886042 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-ovnkube-script-lib\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: E1125 22:59:25.886082 5045 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.886099 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-systemd-units\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: E1125 22:59:25.886126 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:27.886111279 +0000 UTC m=+24.243770391 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.886131 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-systemd-units\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.886146 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lg5r\" (UniqueName: \"kubernetes.io/projected/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-kube-api-access-8lg5r\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.886159 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-run-netns\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.886191 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-etc-openvswitch\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.886188 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-kubelet\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.886336 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-env-overrides\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.886354 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-ovnkube-config\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.886603 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-log-socket\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.886758 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-ovnkube-script-lib\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.891458 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-ovn-node-metrics-cert\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.899085 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.910538 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lg5r\" (UniqueName: \"kubernetes.io/projected/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-kube-api-access-8lg5r\") pod \"ovnkube-node-mrsr4\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.918634 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.933269 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:25 crc kubenswrapper[5045]: I1125 22:59:25.982499 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:25 crc kubenswrapper[5045]: W1125 22:59:25.997060 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0f81194f_4d48_4be6_9f73_8b34ed6b56cc.slice/crio-4e06634f4f6504206e000ccc570179980ad5ad973bbf9854a7530a095b0232a4 WatchSource:0}: Error finding container 4e06634f4f6504206e000ccc570179980ad5ad973bbf9854a7530a095b0232a4: Status 404 returned error can't find the container with id 4e06634f4f6504206e000ccc570179980ad5ad973bbf9854a7530a095b0232a4 Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.396262 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.396311 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:26 crc kubenswrapper[5045]: E1125 22:59:26.396408 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.396443 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:26 crc kubenswrapper[5045]: E1125 22:59:26.396649 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 22:59:26 crc kubenswrapper[5045]: E1125 22:59:26.396769 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.599359 5045 generic.go:334] "Generic (PLEG): container finished" podID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerID="62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e" exitCode=0 Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.599450 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerDied","Data":"62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e"} Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.599513 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerStarted","Data":"4e06634f4f6504206e000ccc570179980ad5ad973bbf9854a7530a095b0232a4"} Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.602238 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" event={"ID":"07acb2e0-1638-4174-8f01-b08385fca2dc","Type":"ContainerStarted","Data":"946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb"} Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.602290 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" event={"ID":"07acb2e0-1638-4174-8f01-b08385fca2dc","Type":"ContainerStarted","Data":"677fec445daca99921e869f9545ef15bd5da33ca29ce3371c1160fc3ad801555"} Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.605680 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7"} Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.607703 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1"} Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.609652 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-lbrq8" event={"ID":"a9143958-692e-41d3-970d-ffdd160f8524","Type":"ContainerStarted","Data":"6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b"} Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.609744 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-lbrq8" event={"ID":"a9143958-692e-41d3-970d-ffdd160f8524","Type":"ContainerStarted","Data":"4af2b0e0cf89596e0817a260133772496e4210713daa01c12eb3fea5ae84deed"} Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.612650 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerStarted","Data":"fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c"} Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.612699 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerStarted","Data":"21a6731018ff81510ec88e5575a5a81cceca3fa3c9eb65c28bd23e0dfbbf8022"} Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.615836 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-ht6dm" event={"ID":"e971a47d-97d5-4a21-a255-2497b2b3cbbc","Type":"ContainerStarted","Data":"69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5"} Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.615894 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-ht6dm" event={"ID":"e971a47d-97d5-4a21-a255-2497b2b3cbbc","Type":"ContainerStarted","Data":"071800add687bee8c66db79ee37001303df901cca8f9b306156128d4e1644d9b"} Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.617276 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.638743 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.657571 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.669670 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.683622 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.696475 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.706869 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.721770 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.732797 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.747507 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.758806 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.774430 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.804915 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.827639 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.844454 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.862203 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.879337 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.896337 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.914288 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.928570 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.943784 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.962060 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.974004 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:26 crc kubenswrapper[5045]: I1125 22:59:26.985552 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.007957 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.017982 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.631391 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957"} Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.634012 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerStarted","Data":"54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9"} Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.638184 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerStarted","Data":"4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef"} Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.638235 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerStarted","Data":"3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4"} Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.638245 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerStarted","Data":"e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f"} Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.638258 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerStarted","Data":"32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc"} Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.640617 5045 generic.go:334] "Generic (PLEG): container finished" podID="07acb2e0-1638-4174-8f01-b08385fca2dc" containerID="946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb" exitCode=0 Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.640968 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" event={"ID":"07acb2e0-1638-4174-8f01-b08385fca2dc","Type":"ContainerDied","Data":"946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb"} Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.649884 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:27Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.665162 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:27Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.703466 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:27Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.724120 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:27Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.743933 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:27Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.762476 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:27Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.780840 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:27Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.800299 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:27Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.809730 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.809911 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.809941 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:27 crc kubenswrapper[5045]: E1125 22:59:27.810020 5045 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 22:59:27 crc kubenswrapper[5045]: E1125 22:59:27.810072 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:31.810055266 +0000 UTC m=+28.167714388 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 22:59:27 crc kubenswrapper[5045]: E1125 22:59:27.810134 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 22:59:31.810124598 +0000 UTC m=+28.167783720 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 22:59:27 crc kubenswrapper[5045]: E1125 22:59:27.810204 5045 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 22:59:27 crc kubenswrapper[5045]: E1125 22:59:27.810242 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:31.810233571 +0000 UTC m=+28.167892693 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.815281 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:27Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.833195 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:27Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.852153 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:27Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.867280 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:27Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.884031 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:27Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.898784 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:27Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.910263 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:27Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.910587 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.910639 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:27 crc kubenswrapper[5045]: E1125 22:59:27.910782 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 22:59:27 crc kubenswrapper[5045]: E1125 22:59:27.910805 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 22:59:27 crc kubenswrapper[5045]: E1125 22:59:27.910820 5045 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:27 crc kubenswrapper[5045]: E1125 22:59:27.910819 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 22:59:27 crc kubenswrapper[5045]: E1125 22:59:27.910842 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 22:59:27 crc kubenswrapper[5045]: E1125 22:59:27.910858 5045 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:27 crc kubenswrapper[5045]: E1125 22:59:27.910866 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:31.910849419 +0000 UTC m=+28.268508531 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:27 crc kubenswrapper[5045]: E1125 22:59:27.910912 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:31.91089454 +0000 UTC m=+28.268553662 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.923001 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:27Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.936908 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:27Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.950774 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:27Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.969933 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:27Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:27 crc kubenswrapper[5045]: I1125 22:59:27.985106 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:27Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.004902 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.017381 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.030274 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.050662 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.062272 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.074043 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.372015 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-xmmf5"] Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.372385 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-xmmf5" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.374638 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.374869 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.376595 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.379072 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.390943 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.397059 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.397130 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.397196 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:28 crc kubenswrapper[5045]: E1125 22:59:28.397216 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 22:59:28 crc kubenswrapper[5045]: E1125 22:59:28.397304 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 22:59:28 crc kubenswrapper[5045]: E1125 22:59:28.397383 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.416797 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.426324 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-99l7t\" (UniqueName: \"kubernetes.io/projected/1daf0409-a84d-47cc-9a90-7283ae93fced-kube-api-access-99l7t\") pod \"node-ca-xmmf5\" (UID: \"1daf0409-a84d-47cc-9a90-7283ae93fced\") " pod="openshift-image-registry/node-ca-xmmf5" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.426375 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/1daf0409-a84d-47cc-9a90-7283ae93fced-serviceca\") pod \"node-ca-xmmf5\" (UID: \"1daf0409-a84d-47cc-9a90-7283ae93fced\") " pod="openshift-image-registry/node-ca-xmmf5" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.426407 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1daf0409-a84d-47cc-9a90-7283ae93fced-host\") pod \"node-ca-xmmf5\" (UID: \"1daf0409-a84d-47cc-9a90-7283ae93fced\") " pod="openshift-image-registry/node-ca-xmmf5" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.431519 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.442311 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.458907 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.473318 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.487901 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.504860 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.519307 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.527674 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/1daf0409-a84d-47cc-9a90-7283ae93fced-serviceca\") pod \"node-ca-xmmf5\" (UID: \"1daf0409-a84d-47cc-9a90-7283ae93fced\") " pod="openshift-image-registry/node-ca-xmmf5" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.527744 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1daf0409-a84d-47cc-9a90-7283ae93fced-host\") pod \"node-ca-xmmf5\" (UID: \"1daf0409-a84d-47cc-9a90-7283ae93fced\") " pod="openshift-image-registry/node-ca-xmmf5" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.527822 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-99l7t\" (UniqueName: \"kubernetes.io/projected/1daf0409-a84d-47cc-9a90-7283ae93fced-kube-api-access-99l7t\") pod \"node-ca-xmmf5\" (UID: \"1daf0409-a84d-47cc-9a90-7283ae93fced\") " pod="openshift-image-registry/node-ca-xmmf5" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.527999 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1daf0409-a84d-47cc-9a90-7283ae93fced-host\") pod \"node-ca-xmmf5\" (UID: \"1daf0409-a84d-47cc-9a90-7283ae93fced\") " pod="openshift-image-registry/node-ca-xmmf5" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.528804 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/1daf0409-a84d-47cc-9a90-7283ae93fced-serviceca\") pod \"node-ca-xmmf5\" (UID: \"1daf0409-a84d-47cc-9a90-7283ae93fced\") " pod="openshift-image-registry/node-ca-xmmf5" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.541769 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.562121 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-99l7t\" (UniqueName: \"kubernetes.io/projected/1daf0409-a84d-47cc-9a90-7283ae93fced-kube-api-access-99l7t\") pod \"node-ca-xmmf5\" (UID: \"1daf0409-a84d-47cc-9a90-7283ae93fced\") " pod="openshift-image-registry/node-ca-xmmf5" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.570381 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.584926 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.599906 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.631577 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.649566 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerStarted","Data":"ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed"} Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.649622 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerStarted","Data":"52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5"} Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.651951 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" event={"ID":"07acb2e0-1638-4174-8f01-b08385fca2dc","Type":"ContainerStarted","Data":"8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479"} Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.665055 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.675765 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.689244 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.691840 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-xmmf5" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.706182 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: W1125 22:59:28.706509 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1daf0409_a84d_47cc_9a90_7283ae93fced.slice/crio-6904d6e3fe0cd80a605841bb583c1f0a65229c15c2e84173bbf52eeec59cb0da WatchSource:0}: Error finding container 6904d6e3fe0cd80a605841bb583c1f0a65229c15c2e84173bbf52eeec59cb0da: Status 404 returned error can't find the container with id 6904d6e3fe0cd80a605841bb583c1f0a65229c15c2e84173bbf52eeec59cb0da Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.723490 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.745435 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.759038 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.777142 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.791593 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.806817 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.829899 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.848249 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.864116 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:28 crc kubenswrapper[5045]: I1125 22:59:28.882426 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:28Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.657694 5045 generic.go:334] "Generic (PLEG): container finished" podID="07acb2e0-1638-4174-8f01-b08385fca2dc" containerID="8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479" exitCode=0 Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.657812 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" event={"ID":"07acb2e0-1638-4174-8f01-b08385fca2dc","Type":"ContainerDied","Data":"8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479"} Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.660663 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-xmmf5" event={"ID":"1daf0409-a84d-47cc-9a90-7283ae93fced","Type":"ContainerStarted","Data":"919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049"} Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.660750 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-xmmf5" event={"ID":"1daf0409-a84d-47cc-9a90-7283ae93fced","Type":"ContainerStarted","Data":"6904d6e3fe0cd80a605841bb583c1f0a65229c15c2e84173bbf52eeec59cb0da"} Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.676914 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:29Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.691846 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:29Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.704673 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:29Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.722743 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:29Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.746374 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:29Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.767909 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:29Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.784294 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:29Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.797348 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:29Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.812567 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:29Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.828973 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:29Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.842939 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:29Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.855692 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:29Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.872737 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:29Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.897113 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:29Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.912828 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:29Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.929211 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:29Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.942745 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:29Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.956127 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:29Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.970897 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:29Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:29 crc kubenswrapper[5045]: I1125 22:59:29.988313 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:29Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.011440 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.028185 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.060256 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.084390 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.108308 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.120691 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.134263 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.145069 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.396543 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.396658 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.396768 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:30 crc kubenswrapper[5045]: E1125 22:59:30.396808 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 22:59:30 crc kubenswrapper[5045]: E1125 22:59:30.396938 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 22:59:30 crc kubenswrapper[5045]: E1125 22:59:30.397081 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.499498 5045 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.501735 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.501779 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.501794 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.501944 5045 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.512681 5045 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.512961 5045 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.514078 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.514105 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.514113 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.514128 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.514138 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:30Z","lastTransitionTime":"2025-11-25T22:59:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:30 crc kubenswrapper[5045]: E1125 22:59:30.538397 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.543235 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.543280 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.543291 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.543312 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.543325 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:30Z","lastTransitionTime":"2025-11-25T22:59:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:30 crc kubenswrapper[5045]: E1125 22:59:30.556509 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.561127 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.561185 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.561196 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.561224 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.561237 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:30Z","lastTransitionTime":"2025-11-25T22:59:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:30 crc kubenswrapper[5045]: E1125 22:59:30.577250 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.582877 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.582919 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.582932 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.582951 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.582963 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:30Z","lastTransitionTime":"2025-11-25T22:59:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:30 crc kubenswrapper[5045]: E1125 22:59:30.595812 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.600610 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.600651 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.600664 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.600685 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.600698 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:30Z","lastTransitionTime":"2025-11-25T22:59:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:30 crc kubenswrapper[5045]: E1125 22:59:30.616947 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: E1125 22:59:30.617111 5045 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.619338 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.619390 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.619409 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.619435 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.619455 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:30Z","lastTransitionTime":"2025-11-25T22:59:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.667551 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerStarted","Data":"3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79"} Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.671192 5045 generic.go:334] "Generic (PLEG): container finished" podID="07acb2e0-1638-4174-8f01-b08385fca2dc" containerID="a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58" exitCode=0 Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.671267 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" event={"ID":"07acb2e0-1638-4174-8f01-b08385fca2dc","Type":"ContainerDied","Data":"a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58"} Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.687950 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.702581 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.717519 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.721982 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.722023 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.722032 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.722048 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.722059 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:30Z","lastTransitionTime":"2025-11-25T22:59:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.731296 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.746567 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.761763 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.776834 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.789378 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.802308 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.814669 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.827911 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.827958 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.827975 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.827995 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.828009 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:30Z","lastTransitionTime":"2025-11-25T22:59:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.838435 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.863580 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.879012 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.889488 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:30Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.930418 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.930464 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.930472 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.930488 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:30 crc kubenswrapper[5045]: I1125 22:59:30.930498 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:30Z","lastTransitionTime":"2025-11-25T22:59:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.032742 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.032785 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.032838 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.032861 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.032873 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:31Z","lastTransitionTime":"2025-11-25T22:59:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.136599 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.136657 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.136678 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.136705 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.136754 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:31Z","lastTransitionTime":"2025-11-25T22:59:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.240308 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.240379 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.240397 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.240427 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.240453 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:31Z","lastTransitionTime":"2025-11-25T22:59:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.343648 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.343691 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.343741 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.343767 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.343783 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:31Z","lastTransitionTime":"2025-11-25T22:59:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.447577 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.447645 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.447664 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.447694 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.447749 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:31Z","lastTransitionTime":"2025-11-25T22:59:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.552057 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.552118 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.552137 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.552164 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.552184 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:31Z","lastTransitionTime":"2025-11-25T22:59:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.655441 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.655478 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.655490 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.655505 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.655515 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:31Z","lastTransitionTime":"2025-11-25T22:59:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.683816 5045 generic.go:334] "Generic (PLEG): container finished" podID="07acb2e0-1638-4174-8f01-b08385fca2dc" containerID="2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf" exitCode=0 Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.683904 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" event={"ID":"07acb2e0-1638-4174-8f01-b08385fca2dc","Type":"ContainerDied","Data":"2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf"} Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.705578 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.708544 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:31Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.727343 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:31Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.754859 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:31Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.759258 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.759317 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.759332 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.759353 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.759367 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:31Z","lastTransitionTime":"2025-11-25T22:59:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.775003 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:31Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.791082 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:31Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.805973 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:31Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.831375 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:31Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.846175 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:31Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.863028 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:31Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.863437 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.863882 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.864083 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.864285 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.864479 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:31Z","lastTransitionTime":"2025-11-25T22:59:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.868300 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.868455 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.868489 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:31 crc kubenswrapper[5045]: E1125 22:59:31.869131 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 22:59:39.869110925 +0000 UTC m=+36.226770037 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 22:59:31 crc kubenswrapper[5045]: E1125 22:59:31.869271 5045 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 22:59:31 crc kubenswrapper[5045]: E1125 22:59:31.869276 5045 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 22:59:31 crc kubenswrapper[5045]: E1125 22:59:31.869351 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:39.869332751 +0000 UTC m=+36.226991863 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 22:59:31 crc kubenswrapper[5045]: E1125 22:59:31.869394 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:39.869361632 +0000 UTC m=+36.227020904 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.880326 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:31Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.900760 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:31Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.923888 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:31Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.936800 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:31Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.946704 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:31Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.967287 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.967345 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.967363 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.967391 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.967410 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:31Z","lastTransitionTime":"2025-11-25T22:59:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.968083 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:31Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.970142 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.970251 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:31 crc kubenswrapper[5045]: E1125 22:59:31.970411 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 22:59:31 crc kubenswrapper[5045]: E1125 22:59:31.970457 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 22:59:31 crc kubenswrapper[5045]: E1125 22:59:31.970477 5045 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:31 crc kubenswrapper[5045]: E1125 22:59:31.970533 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:39.970515575 +0000 UTC m=+36.328174707 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:31 crc kubenswrapper[5045]: E1125 22:59:31.970411 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 22:59:31 crc kubenswrapper[5045]: E1125 22:59:31.970595 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 22:59:31 crc kubenswrapper[5045]: E1125 22:59:31.970614 5045 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:31 crc kubenswrapper[5045]: E1125 22:59:31.970696 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:39.97067723 +0000 UTC m=+36.328336382 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.981620 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:31Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:31 crc kubenswrapper[5045]: I1125 22:59:31.992981 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:31Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.039588 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:32Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.069948 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:32Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.070328 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.070372 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.070385 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.070409 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.070422 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:32Z","lastTransitionTime":"2025-11-25T22:59:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.089991 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:32Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.105911 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:32Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.117470 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:32Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.131943 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:32Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.141770 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:32Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.152239 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:32Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.163739 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:32Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.172438 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.172495 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.172507 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.172526 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.172537 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:32Z","lastTransitionTime":"2025-11-25T22:59:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.178096 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:32Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.198646 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:32Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.275774 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.275833 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.275853 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.275883 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.275904 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:32Z","lastTransitionTime":"2025-11-25T22:59:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.380242 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.380311 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.380333 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.380359 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.380373 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:32Z","lastTransitionTime":"2025-11-25T22:59:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.395846 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.395865 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:32 crc kubenswrapper[5045]: E1125 22:59:32.396060 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.396166 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:32 crc kubenswrapper[5045]: E1125 22:59:32.396367 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 22:59:32 crc kubenswrapper[5045]: E1125 22:59:32.396552 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.484516 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.484573 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.484594 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.484620 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.484639 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:32Z","lastTransitionTime":"2025-11-25T22:59:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.588401 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.588463 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.588481 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.588509 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.588529 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:32Z","lastTransitionTime":"2025-11-25T22:59:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.692848 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.692899 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.692917 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.692940 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.693369 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:32Z","lastTransitionTime":"2025-11-25T22:59:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.696697 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" event={"ID":"07acb2e0-1638-4174-8f01-b08385fca2dc","Type":"ContainerStarted","Data":"152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc"} Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.797197 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.797248 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.797268 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.797294 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.797315 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:32Z","lastTransitionTime":"2025-11-25T22:59:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.902322 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.903163 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.904080 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.904888 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:32 crc kubenswrapper[5045]: I1125 22:59:32.905266 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:32Z","lastTransitionTime":"2025-11-25T22:59:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.008411 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.008463 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.008481 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.008507 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.008527 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:33Z","lastTransitionTime":"2025-11-25T22:59:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.111865 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.111924 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.111938 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.111960 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.111973 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:33Z","lastTransitionTime":"2025-11-25T22:59:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.216426 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.216501 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.216527 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.216564 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.216582 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:33Z","lastTransitionTime":"2025-11-25T22:59:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.320055 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.320121 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.320140 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.320171 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.320192 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:33Z","lastTransitionTime":"2025-11-25T22:59:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.423468 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.423530 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.423543 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.423563 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.423577 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:33Z","lastTransitionTime":"2025-11-25T22:59:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.526577 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.526637 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.526657 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.526685 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.526705 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:33Z","lastTransitionTime":"2025-11-25T22:59:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.628956 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.629011 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.629023 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.629042 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.629055 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:33Z","lastTransitionTime":"2025-11-25T22:59:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.704852 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerStarted","Data":"0d58776fc9b1c9c02cbe458c04e2ddee299a539373acbdf49512c328c3a20352"} Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.705589 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.722688 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:33Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.731994 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.732039 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.732050 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.732070 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.732082 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:33Z","lastTransitionTime":"2025-11-25T22:59:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.742961 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:33Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.763635 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:33Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.780068 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:33Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.786186 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.797873 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:33Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.814465 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:33Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.834010 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:33Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.834930 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.835020 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.835045 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.835080 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.835106 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:33Z","lastTransitionTime":"2025-11-25T22:59:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.849796 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:33Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.874463 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:33Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.893037 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:33Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.920624 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:33Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.938249 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.938285 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.938298 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.938319 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.938334 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:33Z","lastTransitionTime":"2025-11-25T22:59:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.950556 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:33Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.969498 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:33Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:33 crc kubenswrapper[5045]: I1125 22:59:33.985984 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:33Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.005702 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.029202 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.042103 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.042157 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.042175 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.042201 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.042220 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:34Z","lastTransitionTime":"2025-11-25T22:59:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.047438 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.068154 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.086046 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.116617 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.143108 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.145348 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.154253 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.159437 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.159463 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.159477 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:34Z","lastTransitionTime":"2025-11-25T22:59:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.175882 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.195941 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.212439 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.231105 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.261499 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.261531 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.261541 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.261558 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.261570 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:34Z","lastTransitionTime":"2025-11-25T22:59:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.264765 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d58776fc9b1c9c02cbe458c04e2ddee299a539373acbdf49512c328c3a20352\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.274267 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.288636 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.365174 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.365218 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.365230 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.365251 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.365263 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:34Z","lastTransitionTime":"2025-11-25T22:59:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.395890 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.395939 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.395954 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:34 crc kubenswrapper[5045]: E1125 22:59:34.396131 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 22:59:34 crc kubenswrapper[5045]: E1125 22:59:34.396490 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 22:59:34 crc kubenswrapper[5045]: E1125 22:59:34.396565 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.438273 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.455785 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.467771 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.467824 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.467855 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.467871 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.467881 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:34Z","lastTransitionTime":"2025-11-25T22:59:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.470485 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.484692 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.500507 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.517135 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.528944 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.539417 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.555702 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.566591 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.576885 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.576953 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.576973 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.577001 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.577019 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:34Z","lastTransitionTime":"2025-11-25T22:59:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.584033 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.599860 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.612187 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.629280 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d58776fc9b1c9c02cbe458c04e2ddee299a539373acbdf49512c328c3a20352\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.679091 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.679121 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.679129 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.679143 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.679153 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:34Z","lastTransitionTime":"2025-11-25T22:59:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.712074 5045 generic.go:334] "Generic (PLEG): container finished" podID="07acb2e0-1638-4174-8f01-b08385fca2dc" containerID="152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc" exitCode=0 Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.712152 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" event={"ID":"07acb2e0-1638-4174-8f01-b08385fca2dc","Type":"ContainerDied","Data":"152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc"} Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.714127 5045 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.714660 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.731233 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.746811 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.748364 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.762485 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.781681 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.781736 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.781778 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.781799 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.781809 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:34Z","lastTransitionTime":"2025-11-25T22:59:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.785701 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d58776fc9b1c9c02cbe458c04e2ddee299a539373acbdf49512c328c3a20352\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.797684 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.809065 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.819507 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.837467 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.852560 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.868883 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.884340 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.884378 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.884386 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.884400 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.884410 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:34Z","lastTransitionTime":"2025-11-25T22:59:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.886486 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.902664 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.917458 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.929728 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.944083 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.961337 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d58776fc9b1c9c02cbe458c04e2ddee299a539373acbdf49512c328c3a20352\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.975959 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.987517 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.987568 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.987590 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.987616 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.987632 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:34Z","lastTransitionTime":"2025-11-25T22:59:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:34 crc kubenswrapper[5045]: I1125 22:59:34.993068 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:34Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.006925 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.026155 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.039332 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.055913 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.067383 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.084095 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.090404 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.090481 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.090507 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.090540 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.090565 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:35Z","lastTransitionTime":"2025-11-25T22:59:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.106010 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.121213 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.145961 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.170515 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.194185 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.194228 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.194239 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.194255 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.194266 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:35Z","lastTransitionTime":"2025-11-25T22:59:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.297323 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.297748 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.297889 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.298076 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.298220 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:35Z","lastTransitionTime":"2025-11-25T22:59:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.401010 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.401085 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.401111 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.401146 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.401170 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:35Z","lastTransitionTime":"2025-11-25T22:59:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.504351 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.504906 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.505084 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.505228 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.505347 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:35Z","lastTransitionTime":"2025-11-25T22:59:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.608555 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.608621 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.608634 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.608654 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.608668 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:35Z","lastTransitionTime":"2025-11-25T22:59:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.711586 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.711648 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.711661 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.711682 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.711695 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:35Z","lastTransitionTime":"2025-11-25T22:59:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.724386 5045 generic.go:334] "Generic (PLEG): container finished" podID="07acb2e0-1638-4174-8f01-b08385fca2dc" containerID="dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0" exitCode=0 Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.724482 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" event={"ID":"07acb2e0-1638-4174-8f01-b08385fca2dc","Type":"ContainerDied","Data":"dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0"} Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.724569 5045 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.745581 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.766749 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.787935 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.805522 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.814414 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.814449 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.814458 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.814474 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.814486 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:35Z","lastTransitionTime":"2025-11-25T22:59:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.819954 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.837009 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.853230 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.865301 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.886999 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.899737 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.917496 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.921555 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.921604 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.921619 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.921643 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.921663 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:35Z","lastTransitionTime":"2025-11-25T22:59:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.942283 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d58776fc9b1c9c02cbe458c04e2ddee299a539373acbdf49512c328c3a20352\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.961570 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:35 crc kubenswrapper[5045]: I1125 22:59:35.974754 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:35Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.024757 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.024806 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.024819 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.024840 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.024853 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:36Z","lastTransitionTime":"2025-11-25T22:59:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.128615 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.128685 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.128706 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.128768 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.128794 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:36Z","lastTransitionTime":"2025-11-25T22:59:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.233349 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.233836 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.233848 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.233873 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.233885 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:36Z","lastTransitionTime":"2025-11-25T22:59:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.337186 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.337255 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.337274 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.337302 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.337325 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:36Z","lastTransitionTime":"2025-11-25T22:59:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.397971 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.398050 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:36 crc kubenswrapper[5045]: E1125 22:59:36.398175 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.398238 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:36 crc kubenswrapper[5045]: E1125 22:59:36.398347 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 22:59:36 crc kubenswrapper[5045]: E1125 22:59:36.398574 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.440317 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.440391 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.440411 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.440441 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.440465 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:36Z","lastTransitionTime":"2025-11-25T22:59:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.543436 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.543478 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.543489 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.543508 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.543519 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:36Z","lastTransitionTime":"2025-11-25T22:59:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.647052 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.647121 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.647143 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.647172 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.647193 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:36Z","lastTransitionTime":"2025-11-25T22:59:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.732894 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" event={"ID":"07acb2e0-1638-4174-8f01-b08385fca2dc","Type":"ContainerStarted","Data":"5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8"} Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.733078 5045 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.750171 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.750245 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.750264 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.750298 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.750319 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:36Z","lastTransitionTime":"2025-11-25T22:59:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.758285 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:36Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.772223 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:36Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.795992 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:36Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.820931 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:36Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.837363 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:36Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.853621 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.853693 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.853724 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.853749 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.853765 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:36Z","lastTransitionTime":"2025-11-25T22:59:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.863351 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d58776fc9b1c9c02cbe458c04e2ddee299a539373acbdf49512c328c3a20352\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:36Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.890208 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:36Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.914490 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:36Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.976505 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.976538 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.976549 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.976566 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.976578 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:36Z","lastTransitionTime":"2025-11-25T22:59:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:36 crc kubenswrapper[5045]: I1125 22:59:36.981541 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:36Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.003610 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:36Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.017471 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:37Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.046061 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:37Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.070142 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:37Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.079091 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.079139 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.079148 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.079165 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.079177 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:37Z","lastTransitionTime":"2025-11-25T22:59:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.084472 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:37Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.181758 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.181817 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.181827 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.181846 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.181858 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:37Z","lastTransitionTime":"2025-11-25T22:59:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.284236 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.284287 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.284295 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.284310 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.284321 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:37Z","lastTransitionTime":"2025-11-25T22:59:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.387642 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.387709 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.387795 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.387826 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.387850 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:37Z","lastTransitionTime":"2025-11-25T22:59:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.491271 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.491355 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.491376 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.491396 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.491407 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:37Z","lastTransitionTime":"2025-11-25T22:59:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.593761 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.593806 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.593815 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.593831 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.593845 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:37Z","lastTransitionTime":"2025-11-25T22:59:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.697074 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.697186 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.697205 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.697228 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.697247 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:37Z","lastTransitionTime":"2025-11-25T22:59:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.800318 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.800384 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.800403 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.800431 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.800451 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:37Z","lastTransitionTime":"2025-11-25T22:59:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.904438 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.904511 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.904528 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.904554 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:37 crc kubenswrapper[5045]: I1125 22:59:37.904572 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:37Z","lastTransitionTime":"2025-11-25T22:59:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.008061 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.008499 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.008529 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.008556 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.008575 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:38Z","lastTransitionTime":"2025-11-25T22:59:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.111762 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.111848 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.111874 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.111911 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.111936 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:38Z","lastTransitionTime":"2025-11-25T22:59:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.215619 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.215696 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.215758 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.215798 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.215823 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:38Z","lastTransitionTime":"2025-11-25T22:59:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.234925 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c"] Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.235794 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.238699 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.239023 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.266152 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.294388 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.314896 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.319635 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.319705 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.319759 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.319793 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.319821 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:38Z","lastTransitionTime":"2025-11-25T22:59:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.337099 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.342536 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e8fd492c-b76d-46cd-a320-eff22476cb6e-env-overrides\") pod \"ovnkube-control-plane-749d76644c-6rq7c\" (UID: \"e8fd492c-b76d-46cd-a320-eff22476cb6e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.342615 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e8fd492c-b76d-46cd-a320-eff22476cb6e-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-6rq7c\" (UID: \"e8fd492c-b76d-46cd-a320-eff22476cb6e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.342676 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e8fd492c-b76d-46cd-a320-eff22476cb6e-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-6rq7c\" (UID: \"e8fd492c-b76d-46cd-a320-eff22476cb6e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.343556 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7fzd\" (UniqueName: \"kubernetes.io/projected/e8fd492c-b76d-46cd-a320-eff22476cb6e-kube-api-access-x7fzd\") pod \"ovnkube-control-plane-749d76644c-6rq7c\" (UID: \"e8fd492c-b76d-46cd-a320-eff22476cb6e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.369688 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.395835 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.395869 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.395893 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:38 crc kubenswrapper[5045]: E1125 22:59:38.396286 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 22:59:38 crc kubenswrapper[5045]: E1125 22:59:38.396065 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 22:59:38 crc kubenswrapper[5045]: E1125 22:59:38.396537 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.410771 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d58776fc9b1c9c02cbe458c04e2ddee299a539373acbdf49512c328c3a20352\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.424015 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.424203 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.424287 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.424382 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.424492 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:38Z","lastTransitionTime":"2025-11-25T22:59:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.431186 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.444607 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e8fd492c-b76d-46cd-a320-eff22476cb6e-env-overrides\") pod \"ovnkube-control-plane-749d76644c-6rq7c\" (UID: \"e8fd492c-b76d-46cd-a320-eff22476cb6e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.444679 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e8fd492c-b76d-46cd-a320-eff22476cb6e-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-6rq7c\" (UID: \"e8fd492c-b76d-46cd-a320-eff22476cb6e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.444780 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e8fd492c-b76d-46cd-a320-eff22476cb6e-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-6rq7c\" (UID: \"e8fd492c-b76d-46cd-a320-eff22476cb6e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.444828 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7fzd\" (UniqueName: \"kubernetes.io/projected/e8fd492c-b76d-46cd-a320-eff22476cb6e-kube-api-access-x7fzd\") pod \"ovnkube-control-plane-749d76644c-6rq7c\" (UID: \"e8fd492c-b76d-46cd-a320-eff22476cb6e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.445989 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e8fd492c-b76d-46cd-a320-eff22476cb6e-env-overrides\") pod \"ovnkube-control-plane-749d76644c-6rq7c\" (UID: \"e8fd492c-b76d-46cd-a320-eff22476cb6e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.446250 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e8fd492c-b76d-46cd-a320-eff22476cb6e-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-6rq7c\" (UID: \"e8fd492c-b76d-46cd-a320-eff22476cb6e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.453502 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.455696 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e8fd492c-b76d-46cd-a320-eff22476cb6e-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-6rq7c\" (UID: \"e8fd492c-b76d-46cd-a320-eff22476cb6e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.478406 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7fzd\" (UniqueName: \"kubernetes.io/projected/e8fd492c-b76d-46cd-a320-eff22476cb6e-kube-api-access-x7fzd\") pod \"ovnkube-control-plane-749d76644c-6rq7c\" (UID: \"e8fd492c-b76d-46cd-a320-eff22476cb6e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.481211 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.504763 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.526071 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.528794 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.528883 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.528905 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.529278 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.529329 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:38Z","lastTransitionTime":"2025-11-25T22:59:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.547937 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.563478 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.570620 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.590058 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: W1125 22:59:38.593012 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode8fd492c_b76d_46cd_a320_eff22476cb6e.slice/crio-bbcd3739dd18460280f33e28b652ce0ee3e2ec5efcfc381d3ac9a25b32c61103 WatchSource:0}: Error finding container bbcd3739dd18460280f33e28b652ce0ee3e2ec5efcfc381d3ac9a25b32c61103: Status 404 returned error can't find the container with id bbcd3739dd18460280f33e28b652ce0ee3e2ec5efcfc381d3ac9a25b32c61103 Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.617015 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.633357 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.633432 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.633451 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.633479 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.633498 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:38Z","lastTransitionTime":"2025-11-25T22:59:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.737231 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.737288 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.737301 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.737323 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.737338 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:38Z","lastTransitionTime":"2025-11-25T22:59:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.746451 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovnkube-controller/0.log" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.748523 5045 generic.go:334] "Generic (PLEG): container finished" podID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerID="0d58776fc9b1c9c02cbe458c04e2ddee299a539373acbdf49512c328c3a20352" exitCode=1 Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.748596 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerDied","Data":"0d58776fc9b1c9c02cbe458c04e2ddee299a539373acbdf49512c328c3a20352"} Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.749385 5045 scope.go:117] "RemoveContainer" containerID="0d58776fc9b1c9c02cbe458c04e2ddee299a539373acbdf49512c328c3a20352" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.754454 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" event={"ID":"e8fd492c-b76d-46cd-a320-eff22476cb6e","Type":"ContainerStarted","Data":"bbcd3739dd18460280f33e28b652ce0ee3e2ec5efcfc381d3ac9a25b32c61103"} Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.769958 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.793791 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d58776fc9b1c9c02cbe458c04e2ddee299a539373acbdf49512c328c3a20352\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0d58776fc9b1c9c02cbe458c04e2ddee299a539373acbdf49512c328c3a20352\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"message\\\":\\\"troller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:37.776159 6310 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:37.776664 6310 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:37.776880 6310 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 22:59:37.777307 6310 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 22:59:37.777347 6310 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 22:59:37.777450 6310 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 22:59:37.777484 6310 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 22:59:37.777496 6310 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 22:59:37.777513 6310 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:37.777524 6310 factory.go:656] Stopping watch factory\\\\nI1125 22:59:37.777553 6310 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 22:59:37.777566 6310 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:37.777572 6310 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 22:59:37.777574 6310 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.812557 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.829440 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.840890 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.840961 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.840982 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.841009 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.841028 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:38Z","lastTransitionTime":"2025-11-25T22:59:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.845967 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.861106 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.874692 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.890049 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.908474 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.924544 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.941474 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.948266 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.948340 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.948356 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.948379 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.948402 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:38Z","lastTransitionTime":"2025-11-25T22:59:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.961092 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.979739 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:38 crc kubenswrapper[5045]: I1125 22:59:38.997616 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:38Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.014646 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:39Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.050829 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.050901 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.050920 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.050949 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.051011 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:39Z","lastTransitionTime":"2025-11-25T22:59:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.154663 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.154744 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.154756 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.154783 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.154797 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:39Z","lastTransitionTime":"2025-11-25T22:59:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.258501 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.258573 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.258592 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.258622 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.258641 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:39Z","lastTransitionTime":"2025-11-25T22:59:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.361698 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.361805 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.361828 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.361860 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.361879 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:39Z","lastTransitionTime":"2025-11-25T22:59:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.387390 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-9rjvw"] Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.388359 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:39 crc kubenswrapper[5045]: E1125 22:59:39.388530 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.412299 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:39Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.425512 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:39Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.436048 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:39Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.451532 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:39Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.459318 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nr8t2\" (UniqueName: \"kubernetes.io/projected/9e044b50-b07a-44a0-b69f-45fd4392de24-kube-api-access-nr8t2\") pod \"network-metrics-daemon-9rjvw\" (UID: \"9e044b50-b07a-44a0-b69f-45fd4392de24\") " pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.459480 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs\") pod \"network-metrics-daemon-9rjvw\" (UID: \"9e044b50-b07a-44a0-b69f-45fd4392de24\") " pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.469040 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.469085 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.469097 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.469119 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.469132 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:39Z","lastTransitionTime":"2025-11-25T22:59:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.470572 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:39Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.487174 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:39Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.502005 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e044b50-b07a-44a0-b69f-45fd4392de24\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9rjvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:39Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.519149 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:39Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.534006 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:39Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.547480 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:39Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.560506 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nr8t2\" (UniqueName: \"kubernetes.io/projected/9e044b50-b07a-44a0-b69f-45fd4392de24-kube-api-access-nr8t2\") pod \"network-metrics-daemon-9rjvw\" (UID: \"9e044b50-b07a-44a0-b69f-45fd4392de24\") " pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.560557 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs\") pod \"network-metrics-daemon-9rjvw\" (UID: \"9e044b50-b07a-44a0-b69f-45fd4392de24\") " pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:39 crc kubenswrapper[5045]: E1125 22:59:39.560797 5045 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 22:59:39 crc kubenswrapper[5045]: E1125 22:59:39.560881 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs podName:9e044b50-b07a-44a0-b69f-45fd4392de24 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:40.060853578 +0000 UTC m=+36.418512700 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs") pod "network-metrics-daemon-9rjvw" (UID: "9e044b50-b07a-44a0-b69f-45fd4392de24") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.568805 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:39Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.571845 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.572068 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.572192 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.572349 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.572450 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:39Z","lastTransitionTime":"2025-11-25T22:59:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.583493 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:39Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.590878 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nr8t2\" (UniqueName: \"kubernetes.io/projected/9e044b50-b07a-44a0-b69f-45fd4392de24-kube-api-access-nr8t2\") pod \"network-metrics-daemon-9rjvw\" (UID: \"9e044b50-b07a-44a0-b69f-45fd4392de24\") " pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.608679 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:39Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.637395 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d58776fc9b1c9c02cbe458c04e2ddee299a539373acbdf49512c328c3a20352\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0d58776fc9b1c9c02cbe458c04e2ddee299a539373acbdf49512c328c3a20352\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"message\\\":\\\"troller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:37.776159 6310 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:37.776664 6310 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:37.776880 6310 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 22:59:37.777307 6310 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 22:59:37.777347 6310 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 22:59:37.777450 6310 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 22:59:37.777484 6310 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 22:59:37.777496 6310 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 22:59:37.777513 6310 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:37.777524 6310 factory.go:656] Stopping watch factory\\\\nI1125 22:59:37.777553 6310 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 22:59:37.777566 6310 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:37.777572 6310 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 22:59:37.777574 6310 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:39Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.657020 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:39Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.675969 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.676028 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.676052 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.676085 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.676140 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:39Z","lastTransitionTime":"2025-11-25T22:59:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.681974 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:39Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.761275 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" event={"ID":"e8fd492c-b76d-46cd-a320-eff22476cb6e","Type":"ContainerStarted","Data":"804844cbb5c83dd672d509fa84b24623edccd2d4a8371777a3cd1242458b2455"} Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.778950 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.779045 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.779072 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.779106 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.779131 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:39Z","lastTransitionTime":"2025-11-25T22:59:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.882938 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.883009 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.883033 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.883064 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.883085 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:39Z","lastTransitionTime":"2025-11-25T22:59:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.965471 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 22:59:39 crc kubenswrapper[5045]: E1125 22:59:39.965742 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 22:59:55.965684747 +0000 UTC m=+52.323343899 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.965845 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.965911 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:39 crc kubenswrapper[5045]: E1125 22:59:39.966017 5045 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 22:59:39 crc kubenswrapper[5045]: E1125 22:59:39.966083 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:55.966068138 +0000 UTC m=+52.323727280 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 22:59:39 crc kubenswrapper[5045]: E1125 22:59:39.966188 5045 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 22:59:39 crc kubenswrapper[5045]: E1125 22:59:39.966372 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:55.966298754 +0000 UTC m=+52.323957906 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.986761 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.986814 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.986832 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.986858 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:39 crc kubenswrapper[5045]: I1125 22:59:39.986877 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:39Z","lastTransitionTime":"2025-11-25T22:59:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.066943 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs\") pod \"network-metrics-daemon-9rjvw\" (UID: \"9e044b50-b07a-44a0-b69f-45fd4392de24\") " pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.067068 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.067124 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:40 crc kubenswrapper[5045]: E1125 22:59:40.067253 5045 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 22:59:40 crc kubenswrapper[5045]: E1125 22:59:40.067303 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 22:59:40 crc kubenswrapper[5045]: E1125 22:59:40.067341 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 22:59:40 crc kubenswrapper[5045]: E1125 22:59:40.067366 5045 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:40 crc kubenswrapper[5045]: E1125 22:59:40.067366 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs podName:9e044b50-b07a-44a0-b69f-45fd4392de24 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:41.067333884 +0000 UTC m=+37.424993026 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs") pod "network-metrics-daemon-9rjvw" (UID: "9e044b50-b07a-44a0-b69f-45fd4392de24") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 22:59:40 crc kubenswrapper[5045]: E1125 22:59:40.067426 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 22:59:40 crc kubenswrapper[5045]: E1125 22:59:40.067450 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:56.067426227 +0000 UTC m=+52.425085369 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:40 crc kubenswrapper[5045]: E1125 22:59:40.067461 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 22:59:40 crc kubenswrapper[5045]: E1125 22:59:40.067485 5045 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:40 crc kubenswrapper[5045]: E1125 22:59:40.067541 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:56.06752407 +0000 UTC m=+52.425183222 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.093633 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.093774 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.093807 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.093844 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.093874 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:40Z","lastTransitionTime":"2025-11-25T22:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.196315 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.196363 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.196376 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.196400 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.196415 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:40Z","lastTransitionTime":"2025-11-25T22:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.299783 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.299851 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.299869 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.299898 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.299918 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:40Z","lastTransitionTime":"2025-11-25T22:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.396554 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.396669 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:40 crc kubenswrapper[5045]: E1125 22:59:40.396780 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.396559 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:40 crc kubenswrapper[5045]: E1125 22:59:40.396919 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 22:59:40 crc kubenswrapper[5045]: E1125 22:59:40.397120 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.402519 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.402599 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.402619 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.402651 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.402676 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:40Z","lastTransitionTime":"2025-11-25T22:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.506274 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.506314 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.506324 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.506340 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.506350 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:40Z","lastTransitionTime":"2025-11-25T22:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.609242 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.609275 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.609284 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.609301 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.609313 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:40Z","lastTransitionTime":"2025-11-25T22:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.711874 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.711926 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.711942 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.711975 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.711988 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:40Z","lastTransitionTime":"2025-11-25T22:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.767768 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovnkube-controller/0.log" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.771359 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerStarted","Data":"50e1addf8140789f0c322c7b233f1895c69d63d6dfebbc62ebc35048671ab84e"} Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.771598 5045 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.773449 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" event={"ID":"e8fd492c-b76d-46cd-a320-eff22476cb6e","Type":"ContainerStarted","Data":"aa353669d8909b162edcf8d25149ed8cc87138d07b81631c4345d25966d81abf"} Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.791284 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:40Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.805653 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:40Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.817597 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:40Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.821622 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.821678 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.821690 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.821735 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.821750 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:40Z","lastTransitionTime":"2025-11-25T22:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.830108 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:40Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.841420 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:40Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.854126 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e044b50-b07a-44a0-b69f-45fd4392de24\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9rjvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:40Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.871002 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:40Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.889841 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:40Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.907685 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:40Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.925279 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.925353 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.925371 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.925398 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.925418 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:40Z","lastTransitionTime":"2025-11-25T22:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.928803 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:40Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.958447 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:40Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.963452 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.963680 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.963797 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.963887 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.963951 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:40Z","lastTransitionTime":"2025-11-25T22:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:40 crc kubenswrapper[5045]: E1125 22:59:40.983031 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:40Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.985702 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:40Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.987938 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.987991 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.988009 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.988035 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:40 crc kubenswrapper[5045]: I1125 22:59:40.988055 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:40Z","lastTransitionTime":"2025-11-25T22:59:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:40 crc kubenswrapper[5045]: E1125 22:59:40.999496 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:40Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.002577 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.002612 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.002624 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.002647 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.002664 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:41Z","lastTransitionTime":"2025-11-25T22:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.008787 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50e1addf8140789f0c322c7b233f1895c69d63d6dfebbc62ebc35048671ab84e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0d58776fc9b1c9c02cbe458c04e2ddee299a539373acbdf49512c328c3a20352\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"message\\\":\\\"troller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:37.776159 6310 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:37.776664 6310 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:37.776880 6310 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 22:59:37.777307 6310 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 22:59:37.777347 6310 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 22:59:37.777450 6310 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 22:59:37.777484 6310 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 22:59:37.777496 6310 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 22:59:37.777513 6310 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:37.777524 6310 factory.go:656] Stopping watch factory\\\\nI1125 22:59:37.777553 6310 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 22:59:37.777566 6310 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:37.777572 6310 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 22:59:37.777574 6310 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: E1125 22:59:41.016379 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.021185 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.021220 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.021232 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.021252 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.021266 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:41Z","lastTransitionTime":"2025-11-25T22:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.023755 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: E1125 22:59:41.034095 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.038915 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.038987 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.039004 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.039026 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.039047 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:41Z","lastTransitionTime":"2025-11-25T22:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.041633 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: E1125 22:59:41.054798 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: E1125 22:59:41.055222 5045 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.057159 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.057210 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.057225 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.057246 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.057261 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:41Z","lastTransitionTime":"2025-11-25T22:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.061888 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.079897 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs\") pod \"network-metrics-daemon-9rjvw\" (UID: \"9e044b50-b07a-44a0-b69f-45fd4392de24\") " pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:41 crc kubenswrapper[5045]: E1125 22:59:41.080067 5045 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 22:59:41 crc kubenswrapper[5045]: E1125 22:59:41.080205 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs podName:9e044b50-b07a-44a0-b69f-45fd4392de24 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:43.080184512 +0000 UTC m=+39.437843624 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs") pod "network-metrics-daemon-9rjvw" (UID: "9e044b50-b07a-44a0-b69f-45fd4392de24") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.088521 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50e1addf8140789f0c322c7b233f1895c69d63d6dfebbc62ebc35048671ab84e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0d58776fc9b1c9c02cbe458c04e2ddee299a539373acbdf49512c328c3a20352\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"message\\\":\\\"troller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:37.776159 6310 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:37.776664 6310 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:37.776880 6310 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 22:59:37.777307 6310 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 22:59:37.777347 6310 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 22:59:37.777450 6310 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 22:59:37.777484 6310 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 22:59:37.777496 6310 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 22:59:37.777513 6310 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:37.777524 6310 factory.go:656] Stopping watch factory\\\\nI1125 22:59:37.777553 6310 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 22:59:37.777566 6310 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:37.777572 6310 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 22:59:37.777574 6310 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.102830 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.118544 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.133401 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.148478 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.161363 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.161730 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.161771 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.161783 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.161805 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.161817 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:41Z","lastTransitionTime":"2025-11-25T22:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.174067 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://804844cbb5c83dd672d509fa84b24623edccd2d4a8371777a3cd1242458b2455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa353669d8909b162edcf8d25149ed8cc87138d07b81631c4345d25966d81abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.187467 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.198014 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.213292 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e044b50-b07a-44a0-b69f-45fd4392de24\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9rjvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.229857 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.249568 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.264963 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.265060 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.265087 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.265123 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.265149 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:41Z","lastTransitionTime":"2025-11-25T22:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.270192 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.288779 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.306942 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.324213 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.368173 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.368242 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.368262 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.368290 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.368309 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:41Z","lastTransitionTime":"2025-11-25T22:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.395633 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:41 crc kubenswrapper[5045]: E1125 22:59:41.395878 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.477884 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.477956 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.477989 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.478023 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.478045 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:41Z","lastTransitionTime":"2025-11-25T22:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.583616 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.583664 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.583674 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.583695 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.583739 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:41Z","lastTransitionTime":"2025-11-25T22:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.687090 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.687153 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.687168 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.687194 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.687215 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:41Z","lastTransitionTime":"2025-11-25T22:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.791117 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.791177 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.791190 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.791213 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.791228 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:41Z","lastTransitionTime":"2025-11-25T22:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.792608 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovnkube-controller/1.log" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.793489 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovnkube-controller/0.log" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.797766 5045 generic.go:334] "Generic (PLEG): container finished" podID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerID="50e1addf8140789f0c322c7b233f1895c69d63d6dfebbc62ebc35048671ab84e" exitCode=1 Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.797883 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerDied","Data":"50e1addf8140789f0c322c7b233f1895c69d63d6dfebbc62ebc35048671ab84e"} Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.798006 5045 scope.go:117] "RemoveContainer" containerID="0d58776fc9b1c9c02cbe458c04e2ddee299a539373acbdf49512c328c3a20352" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.798984 5045 scope.go:117] "RemoveContainer" containerID="50e1addf8140789f0c322c7b233f1895c69d63d6dfebbc62ebc35048671ab84e" Nov 25 22:59:41 crc kubenswrapper[5045]: E1125 22:59:41.799260 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mrsr4_openshift-ovn-kubernetes(0f81194f-4d48-4be6-9f73-8b34ed6b56cc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.822984 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.840343 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.865213 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.889266 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.895265 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.895352 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.895381 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.895449 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.895494 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:41Z","lastTransitionTime":"2025-11-25T22:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.911839 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.947186 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50e1addf8140789f0c322c7b233f1895c69d63d6dfebbc62ebc35048671ab84e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0d58776fc9b1c9c02cbe458c04e2ddee299a539373acbdf49512c328c3a20352\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"message\\\":\\\"troller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:37.776159 6310 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:37.776664 6310 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:37.776880 6310 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 22:59:37.777307 6310 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 22:59:37.777347 6310 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 22:59:37.777450 6310 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 22:59:37.777484 6310 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 22:59:37.777496 6310 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 22:59:37.777513 6310 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:37.777524 6310 factory.go:656] Stopping watch factory\\\\nI1125 22:59:37.777553 6310 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 22:59:37.777566 6310 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:37.777572 6310 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 22:59:37.777574 6310 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50e1addf8140789f0c322c7b233f1895c69d63d6dfebbc62ebc35048671ab84e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"message\\\":\\\" reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.056391 6505 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.056624 6505 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.056903 6505 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.057003 6505 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:41.057747 6505 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 22:59:41.057848 6505 factory.go:656] Stopping watch factory\\\\nI1125 22:59:41.057876 6505 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:41.061029 6505 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1125 22:59:41.061058 6505 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1125 22:59:41.061115 6505 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:41.061145 6505 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 22:59:41.061227 6505 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.970198 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://804844cbb5c83dd672d509fa84b24623edccd2d4a8371777a3cd1242458b2455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa353669d8909b162edcf8d25149ed8cc87138d07b81631c4345d25966d81abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.994820 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:41Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.998314 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.998357 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.998369 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.998397 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:41 crc kubenswrapper[5045]: I1125 22:59:41.998410 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:41Z","lastTransitionTime":"2025-11-25T22:59:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.014615 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:42Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.031677 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:42Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.050152 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:42Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.069576 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:42Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.087468 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:42Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.101674 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.101739 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.101750 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.101772 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.101790 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:42Z","lastTransitionTime":"2025-11-25T22:59:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.102355 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:42Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.114447 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e044b50-b07a-44a0-b69f-45fd4392de24\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9rjvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:42Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.134909 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:42Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.204924 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.204985 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.205003 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.205029 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.205042 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:42Z","lastTransitionTime":"2025-11-25T22:59:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.308486 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.308832 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.308922 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.308993 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.309068 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:42Z","lastTransitionTime":"2025-11-25T22:59:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.395851 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.395927 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:42 crc kubenswrapper[5045]: E1125 22:59:42.396042 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 22:59:42 crc kubenswrapper[5045]: E1125 22:59:42.396454 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.396203 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:42 crc kubenswrapper[5045]: E1125 22:59:42.396830 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.413490 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.414006 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.414160 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.414300 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.414436 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:42Z","lastTransitionTime":"2025-11-25T22:59:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.521177 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.521600 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.521830 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.521982 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.522118 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:42Z","lastTransitionTime":"2025-11-25T22:59:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.625419 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.625490 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.625515 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.625547 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.625574 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:42Z","lastTransitionTime":"2025-11-25T22:59:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.729931 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.730010 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.730032 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.730059 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.730079 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:42Z","lastTransitionTime":"2025-11-25T22:59:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.806975 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovnkube-controller/1.log" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.834195 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.834247 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.834265 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.834288 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.834307 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:42Z","lastTransitionTime":"2025-11-25T22:59:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.937306 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.937360 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.937377 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.937400 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:42 crc kubenswrapper[5045]: I1125 22:59:42.937418 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:42Z","lastTransitionTime":"2025-11-25T22:59:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.041484 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.041535 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.041553 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.041578 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.041600 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:43Z","lastTransitionTime":"2025-11-25T22:59:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.103531 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs\") pod \"network-metrics-daemon-9rjvw\" (UID: \"9e044b50-b07a-44a0-b69f-45fd4392de24\") " pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:43 crc kubenswrapper[5045]: E1125 22:59:43.103889 5045 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 22:59:43 crc kubenswrapper[5045]: E1125 22:59:43.104354 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs podName:9e044b50-b07a-44a0-b69f-45fd4392de24 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:47.104309465 +0000 UTC m=+43.461968587 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs") pod "network-metrics-daemon-9rjvw" (UID: "9e044b50-b07a-44a0-b69f-45fd4392de24") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.145438 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.145506 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.145524 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.145550 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.145570 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:43Z","lastTransitionTime":"2025-11-25T22:59:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.248909 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.249404 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.249449 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.249476 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.249496 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:43Z","lastTransitionTime":"2025-11-25T22:59:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.353406 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.353459 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.353472 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.353494 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.353510 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:43Z","lastTransitionTime":"2025-11-25T22:59:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.366916 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.367877 5045 scope.go:117] "RemoveContainer" containerID="50e1addf8140789f0c322c7b233f1895c69d63d6dfebbc62ebc35048671ab84e" Nov 25 22:59:43 crc kubenswrapper[5045]: E1125 22:59:43.368189 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mrsr4_openshift-ovn-kubernetes(0f81194f-4d48-4be6-9f73-8b34ed6b56cc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.393744 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://804844cbb5c83dd672d509fa84b24623edccd2d4a8371777a3cd1242458b2455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa353669d8909b162edcf8d25149ed8cc87138d07b81631c4345d25966d81abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:43Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.395991 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:43 crc kubenswrapper[5045]: E1125 22:59:43.396437 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.414459 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:43Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.431568 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:43Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.456872 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.457257 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.457388 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.457527 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.457525 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:43Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.457667 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:43Z","lastTransitionTime":"2025-11-25T22:59:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.477793 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:43Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.499689 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:43Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.520155 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:43Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.538084 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:43Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.556378 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e044b50-b07a-44a0-b69f-45fd4392de24\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9rjvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:43Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.560756 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.560810 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.560829 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.560857 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.560877 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:43Z","lastTransitionTime":"2025-11-25T22:59:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.578541 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:43Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.600837 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:43Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.624156 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:43Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.644097 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:43Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.664613 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.664674 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.664691 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.664748 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.664767 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:43Z","lastTransitionTime":"2025-11-25T22:59:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.665753 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:43Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.682975 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:43Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.715594 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50e1addf8140789f0c322c7b233f1895c69d63d6dfebbc62ebc35048671ab84e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50e1addf8140789f0c322c7b233f1895c69d63d6dfebbc62ebc35048671ab84e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"message\\\":\\\" reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.056391 6505 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.056624 6505 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.056903 6505 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.057003 6505 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:41.057747 6505 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 22:59:41.057848 6505 factory.go:656] Stopping watch factory\\\\nI1125 22:59:41.057876 6505 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:41.061029 6505 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1125 22:59:41.061058 6505 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1125 22:59:41.061115 6505 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:41.061145 6505 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 22:59:41.061227 6505 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mrsr4_openshift-ovn-kubernetes(0f81194f-4d48-4be6-9f73-8b34ed6b56cc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:43Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.768038 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.768286 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.768476 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.768708 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.768959 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:43Z","lastTransitionTime":"2025-11-25T22:59:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.872394 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.872473 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.872493 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.872526 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.872549 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:43Z","lastTransitionTime":"2025-11-25T22:59:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.976051 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.976120 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.976133 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.976157 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:43 crc kubenswrapper[5045]: I1125 22:59:43.976175 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:43Z","lastTransitionTime":"2025-11-25T22:59:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.079509 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.079571 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.079587 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.079612 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.079631 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:44Z","lastTransitionTime":"2025-11-25T22:59:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.182841 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.182940 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.182955 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.182978 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.182992 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:44Z","lastTransitionTime":"2025-11-25T22:59:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.287015 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.287080 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.287093 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.287116 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.287130 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:44Z","lastTransitionTime":"2025-11-25T22:59:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.391143 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.391529 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.391657 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.391879 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.392008 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:44Z","lastTransitionTime":"2025-11-25T22:59:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.396173 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.396199 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.396233 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:44 crc kubenswrapper[5045]: E1125 22:59:44.396491 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 22:59:44 crc kubenswrapper[5045]: E1125 22:59:44.396851 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 22:59:44 crc kubenswrapper[5045]: E1125 22:59:44.397004 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.417691 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:44Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.432665 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:44Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.448873 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://804844cbb5c83dd672d509fa84b24623edccd2d4a8371777a3cd1242458b2455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa353669d8909b162edcf8d25149ed8cc87138d07b81631c4345d25966d81abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:44Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.468646 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:44Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.488377 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:44Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.494489 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.494563 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.494592 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.494630 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.494658 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:44Z","lastTransitionTime":"2025-11-25T22:59:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.508179 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:44Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.526135 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:44Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.542392 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e044b50-b07a-44a0-b69f-45fd4392de24\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9rjvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:44Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.582550 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:44Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.597657 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.597952 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.598039 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.598169 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.598248 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:44Z","lastTransitionTime":"2025-11-25T22:59:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.610745 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:44Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.644575 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:44Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.660205 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:44Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.678149 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:44Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.693968 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:44Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.704542 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.704608 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.704624 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.704647 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.704664 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:44Z","lastTransitionTime":"2025-11-25T22:59:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.717349 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50e1addf8140789f0c322c7b233f1895c69d63d6dfebbc62ebc35048671ab84e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50e1addf8140789f0c322c7b233f1895c69d63d6dfebbc62ebc35048671ab84e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"message\\\":\\\" reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.056391 6505 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.056624 6505 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.056903 6505 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.057003 6505 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:41.057747 6505 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 22:59:41.057848 6505 factory.go:656] Stopping watch factory\\\\nI1125 22:59:41.057876 6505 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:41.061029 6505 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1125 22:59:41.061058 6505 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1125 22:59:41.061115 6505 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:41.061145 6505 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 22:59:41.061227 6505 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mrsr4_openshift-ovn-kubernetes(0f81194f-4d48-4be6-9f73-8b34ed6b56cc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:44Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.730800 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:44Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.808313 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.808359 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.808372 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.808396 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.808410 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:44Z","lastTransitionTime":"2025-11-25T22:59:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.911045 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.911108 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.911125 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.911152 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:44 crc kubenswrapper[5045]: I1125 22:59:44.911171 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:44Z","lastTransitionTime":"2025-11-25T22:59:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.013972 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.014574 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.014661 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.014796 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.014913 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:45Z","lastTransitionTime":"2025-11-25T22:59:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.118522 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.118918 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.119010 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.119310 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.119395 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:45Z","lastTransitionTime":"2025-11-25T22:59:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.223130 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.223433 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.223519 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.223604 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.223680 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:45Z","lastTransitionTime":"2025-11-25T22:59:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.326378 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.326421 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.326434 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.326454 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.326469 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:45Z","lastTransitionTime":"2025-11-25T22:59:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.396668 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:45 crc kubenswrapper[5045]: E1125 22:59:45.396916 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.429325 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.429381 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.429395 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.429419 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.429432 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:45Z","lastTransitionTime":"2025-11-25T22:59:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.532663 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.532766 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.532786 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.532817 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.532840 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:45Z","lastTransitionTime":"2025-11-25T22:59:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.635931 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.635991 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.636008 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.636034 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.636053 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:45Z","lastTransitionTime":"2025-11-25T22:59:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.739151 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.739223 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.739240 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.739273 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.739292 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:45Z","lastTransitionTime":"2025-11-25T22:59:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.879104 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.879173 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.879192 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.879221 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.879242 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:45Z","lastTransitionTime":"2025-11-25T22:59:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.982810 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.982897 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.982919 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.982951 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:45 crc kubenswrapper[5045]: I1125 22:59:45.982974 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:45Z","lastTransitionTime":"2025-11-25T22:59:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.086786 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.086847 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.086862 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.086886 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.086930 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:46Z","lastTransitionTime":"2025-11-25T22:59:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.190291 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.190358 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.190376 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.190400 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.190419 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:46Z","lastTransitionTime":"2025-11-25T22:59:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.293124 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.293225 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.293254 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.293291 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.293314 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:46Z","lastTransitionTime":"2025-11-25T22:59:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.395874 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.395920 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.396114 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:46 crc kubenswrapper[5045]: E1125 22:59:46.396302 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 22:59:46 crc kubenswrapper[5045]: E1125 22:59:46.396446 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 22:59:46 crc kubenswrapper[5045]: E1125 22:59:46.396771 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.397266 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.397319 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.397338 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.397365 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.397387 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:46Z","lastTransitionTime":"2025-11-25T22:59:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.501225 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.501289 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.501307 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.501332 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.501352 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:46Z","lastTransitionTime":"2025-11-25T22:59:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.604753 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.604849 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.604861 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.604886 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.604899 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:46Z","lastTransitionTime":"2025-11-25T22:59:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.707893 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.707956 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.707978 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.708004 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.708024 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:46Z","lastTransitionTime":"2025-11-25T22:59:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.811876 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.811937 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.811949 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.811974 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.811988 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:46Z","lastTransitionTime":"2025-11-25T22:59:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.915049 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.915091 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.915103 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.915121 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:46 crc kubenswrapper[5045]: I1125 22:59:46.915132 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:46Z","lastTransitionTime":"2025-11-25T22:59:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.018545 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.018607 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.018618 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.018639 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.018656 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:47Z","lastTransitionTime":"2025-11-25T22:59:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.122818 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.122883 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.122896 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.122920 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.122939 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:47Z","lastTransitionTime":"2025-11-25T22:59:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.156292 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs\") pod \"network-metrics-daemon-9rjvw\" (UID: \"9e044b50-b07a-44a0-b69f-45fd4392de24\") " pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:47 crc kubenswrapper[5045]: E1125 22:59:47.156621 5045 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 22:59:47 crc kubenswrapper[5045]: E1125 22:59:47.156825 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs podName:9e044b50-b07a-44a0-b69f-45fd4392de24 nodeName:}" failed. No retries permitted until 2025-11-25 22:59:55.156784249 +0000 UTC m=+51.514443391 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs") pod "network-metrics-daemon-9rjvw" (UID: "9e044b50-b07a-44a0-b69f-45fd4392de24") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.226658 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.226765 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.226784 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.226830 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.226850 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:47Z","lastTransitionTime":"2025-11-25T22:59:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.330432 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.330503 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.330522 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.330554 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.330576 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:47Z","lastTransitionTime":"2025-11-25T22:59:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.396651 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:47 crc kubenswrapper[5045]: E1125 22:59:47.396914 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.434470 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.434509 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.434523 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.434544 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.434560 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:47Z","lastTransitionTime":"2025-11-25T22:59:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.537917 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.538001 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.538027 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.538065 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.538092 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:47Z","lastTransitionTime":"2025-11-25T22:59:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.641173 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.641262 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.641284 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.641314 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.641334 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:47Z","lastTransitionTime":"2025-11-25T22:59:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.744350 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.744418 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.744432 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.744455 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.744472 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:47Z","lastTransitionTime":"2025-11-25T22:59:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.847629 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.847679 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.847689 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.847730 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.847742 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:47Z","lastTransitionTime":"2025-11-25T22:59:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.951394 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.951478 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.951498 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.951524 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:47 crc kubenswrapper[5045]: I1125 22:59:47.951568 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:47Z","lastTransitionTime":"2025-11-25T22:59:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.055690 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.055765 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.055776 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.055801 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.055820 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:48Z","lastTransitionTime":"2025-11-25T22:59:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.159864 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.159938 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.159957 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.159987 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.160006 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:48Z","lastTransitionTime":"2025-11-25T22:59:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.263549 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.263640 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.263669 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.263702 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.263759 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:48Z","lastTransitionTime":"2025-11-25T22:59:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.366910 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.366982 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.367010 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.367042 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.367063 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:48Z","lastTransitionTime":"2025-11-25T22:59:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.396709 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.396842 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.396865 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:48 crc kubenswrapper[5045]: E1125 22:59:48.396990 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 22:59:48 crc kubenswrapper[5045]: E1125 22:59:48.397153 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 22:59:48 crc kubenswrapper[5045]: E1125 22:59:48.397239 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.470193 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.470287 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.470307 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.470332 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.470351 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:48Z","lastTransitionTime":"2025-11-25T22:59:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.573508 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.573569 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.573588 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.573614 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.573632 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:48Z","lastTransitionTime":"2025-11-25T22:59:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.676527 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.676598 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.676616 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.676645 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.676665 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:48Z","lastTransitionTime":"2025-11-25T22:59:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.779668 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.779746 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.779762 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.779791 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.779813 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:48Z","lastTransitionTime":"2025-11-25T22:59:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.883098 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.883700 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.883934 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.884114 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.884263 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:48Z","lastTransitionTime":"2025-11-25T22:59:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.988140 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.988222 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.988255 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.988290 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:48 crc kubenswrapper[5045]: I1125 22:59:48.988311 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:48Z","lastTransitionTime":"2025-11-25T22:59:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.092222 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.092290 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.092311 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.092341 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.092363 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:49Z","lastTransitionTime":"2025-11-25T22:59:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.196042 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.196100 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.196118 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.196144 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.196163 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:49Z","lastTransitionTime":"2025-11-25T22:59:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.299430 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.299498 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.299513 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.299537 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.299552 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:49Z","lastTransitionTime":"2025-11-25T22:59:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.396283 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:49 crc kubenswrapper[5045]: E1125 22:59:49.396589 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.403127 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.403199 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.403215 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.403243 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.403261 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:49Z","lastTransitionTime":"2025-11-25T22:59:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.507144 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.507234 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.507259 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.507291 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.507315 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:49Z","lastTransitionTime":"2025-11-25T22:59:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.610662 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.610784 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.610805 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.610831 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.610849 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:49Z","lastTransitionTime":"2025-11-25T22:59:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.714513 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.714581 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.714598 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.714626 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.714644 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:49Z","lastTransitionTime":"2025-11-25T22:59:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.817989 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.818085 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.818106 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.818138 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.818159 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:49Z","lastTransitionTime":"2025-11-25T22:59:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.921546 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.921629 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.921653 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.921686 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:49 crc kubenswrapper[5045]: I1125 22:59:49.921709 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:49Z","lastTransitionTime":"2025-11-25T22:59:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.027259 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.027321 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.027343 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.027368 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.027386 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:50Z","lastTransitionTime":"2025-11-25T22:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.130437 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.130488 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.130498 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.130514 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.130541 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:50Z","lastTransitionTime":"2025-11-25T22:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.233597 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.233664 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.233675 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.233695 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.233721 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:50Z","lastTransitionTime":"2025-11-25T22:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.337568 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.337641 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.337663 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.337692 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.337740 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:50Z","lastTransitionTime":"2025-11-25T22:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.396202 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.396308 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:50 crc kubenswrapper[5045]: E1125 22:59:50.396428 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.396471 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:50 crc kubenswrapper[5045]: E1125 22:59:50.396595 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 22:59:50 crc kubenswrapper[5045]: E1125 22:59:50.396807 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.441086 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.441149 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.441167 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.441191 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.441209 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:50Z","lastTransitionTime":"2025-11-25T22:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.544975 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.545048 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.545062 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.545085 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.545102 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:50Z","lastTransitionTime":"2025-11-25T22:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.648031 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.648089 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.648109 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.648132 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.648149 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:50Z","lastTransitionTime":"2025-11-25T22:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.751082 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.751125 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.751135 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.751151 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.751164 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:50Z","lastTransitionTime":"2025-11-25T22:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.854312 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.854348 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.854359 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.854378 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.854391 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:50Z","lastTransitionTime":"2025-11-25T22:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.958379 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.958557 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.958577 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.958604 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:50 crc kubenswrapper[5045]: I1125 22:59:50.958626 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:50Z","lastTransitionTime":"2025-11-25T22:59:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.062701 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.062805 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.062826 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.062853 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.062874 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:51Z","lastTransitionTime":"2025-11-25T22:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.166944 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.167025 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.167045 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.167073 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.167092 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:51Z","lastTransitionTime":"2025-11-25T22:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.270610 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.270655 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.270666 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.270686 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.270706 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:51Z","lastTransitionTime":"2025-11-25T22:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.374041 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.374116 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.374135 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.374163 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.374182 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:51Z","lastTransitionTime":"2025-11-25T22:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.396342 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:51 crc kubenswrapper[5045]: E1125 22:59:51.396518 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.396989 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.397027 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.397051 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.397082 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.397105 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:51Z","lastTransitionTime":"2025-11-25T22:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:51 crc kubenswrapper[5045]: E1125 22:59:51.412806 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:51Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.417912 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.417982 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.418010 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.418038 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.418061 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:51Z","lastTransitionTime":"2025-11-25T22:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:51 crc kubenswrapper[5045]: E1125 22:59:51.439354 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:51Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.445631 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.445704 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.445739 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.445774 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.445793 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:51Z","lastTransitionTime":"2025-11-25T22:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:51 crc kubenswrapper[5045]: E1125 22:59:51.464802 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:51Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.475404 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.475479 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.475500 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.475559 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.475581 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:51Z","lastTransitionTime":"2025-11-25T22:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:51 crc kubenswrapper[5045]: E1125 22:59:51.495412 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:51Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.501302 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.501365 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.501390 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.501449 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.501475 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:51Z","lastTransitionTime":"2025-11-25T22:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:51 crc kubenswrapper[5045]: E1125 22:59:51.519644 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:51Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:51 crc kubenswrapper[5045]: E1125 22:59:51.519953 5045 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.521757 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.521813 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.521833 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.521855 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.521872 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:51Z","lastTransitionTime":"2025-11-25T22:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.625788 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.625886 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.625906 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.625934 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.625952 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:51Z","lastTransitionTime":"2025-11-25T22:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.729068 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.729140 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.729157 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.729182 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.729202 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:51Z","lastTransitionTime":"2025-11-25T22:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.833042 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.833092 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.833112 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.833140 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.833158 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:51Z","lastTransitionTime":"2025-11-25T22:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.936465 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.936558 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.936611 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.936642 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:51 crc kubenswrapper[5045]: I1125 22:59:51.936659 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:51Z","lastTransitionTime":"2025-11-25T22:59:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.040202 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.040257 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.040268 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.040289 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.040302 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:52Z","lastTransitionTime":"2025-11-25T22:59:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.144430 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.144498 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.144515 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.144541 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.144559 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:52Z","lastTransitionTime":"2025-11-25T22:59:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.247475 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.247542 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.247560 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.247588 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.247608 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:52Z","lastTransitionTime":"2025-11-25T22:59:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.350948 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.351028 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.351046 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.351520 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.351553 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:52Z","lastTransitionTime":"2025-11-25T22:59:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.396008 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:52 crc kubenswrapper[5045]: E1125 22:59:52.396183 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.396697 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:52 crc kubenswrapper[5045]: E1125 22:59:52.396831 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.396912 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:52 crc kubenswrapper[5045]: E1125 22:59:52.397007 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.454846 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.454901 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.454920 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.454953 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.454978 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:52Z","lastTransitionTime":"2025-11-25T22:59:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.558133 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.558200 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.558217 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.558244 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.558264 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:52Z","lastTransitionTime":"2025-11-25T22:59:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.661143 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.661211 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.661228 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.661254 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.661273 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:52Z","lastTransitionTime":"2025-11-25T22:59:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.764767 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.764851 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.764875 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.764907 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.764930 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:52Z","lastTransitionTime":"2025-11-25T22:59:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.868672 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.868819 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.868850 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.868884 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.868911 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:52Z","lastTransitionTime":"2025-11-25T22:59:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.972193 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.972252 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.972269 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.972310 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:52 crc kubenswrapper[5045]: I1125 22:59:52.972329 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:52Z","lastTransitionTime":"2025-11-25T22:59:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.075793 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.075869 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.075889 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.075914 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.075934 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:53Z","lastTransitionTime":"2025-11-25T22:59:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.178918 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.178977 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.178994 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.179022 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.179042 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:53Z","lastTransitionTime":"2025-11-25T22:59:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.282451 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.282538 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.282558 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.282591 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.282613 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:53Z","lastTransitionTime":"2025-11-25T22:59:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.391202 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.391268 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.391288 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.391319 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.391346 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:53Z","lastTransitionTime":"2025-11-25T22:59:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.396444 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:53 crc kubenswrapper[5045]: E1125 22:59:53.396703 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.494348 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.494405 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.494424 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.494449 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.494468 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:53Z","lastTransitionTime":"2025-11-25T22:59:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.597503 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.597663 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.597684 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.597708 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.597756 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:53Z","lastTransitionTime":"2025-11-25T22:59:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.701271 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.701337 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.701355 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.701383 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.701402 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:53Z","lastTransitionTime":"2025-11-25T22:59:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.803931 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.803983 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.803994 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.804013 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.804027 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:53Z","lastTransitionTime":"2025-11-25T22:59:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.907205 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.907305 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.907332 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.907371 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:53 crc kubenswrapper[5045]: I1125 22:59:53.907397 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:53Z","lastTransitionTime":"2025-11-25T22:59:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.011454 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.012028 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.012254 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.012465 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.012669 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:54Z","lastTransitionTime":"2025-11-25T22:59:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.116664 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.117210 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.117376 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.117574 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.117772 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:54Z","lastTransitionTime":"2025-11-25T22:59:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.224226 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.225497 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.225664 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.225913 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.226167 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:54Z","lastTransitionTime":"2025-11-25T22:59:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.329333 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.329409 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.329429 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.329457 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.329477 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:54Z","lastTransitionTime":"2025-11-25T22:59:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.396661 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.396770 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.397057 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:54 crc kubenswrapper[5045]: E1125 22:59:54.397047 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 22:59:54 crc kubenswrapper[5045]: E1125 22:59:54.397293 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 22:59:54 crc kubenswrapper[5045]: E1125 22:59:54.397448 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.419389 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:54Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.432830 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.432910 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.432938 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.432972 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.432997 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:54Z","lastTransitionTime":"2025-11-25T22:59:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.441226 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:54Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.459099 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:54Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.488888 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50e1addf8140789f0c322c7b233f1895c69d63d6dfebbc62ebc35048671ab84e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50e1addf8140789f0c322c7b233f1895c69d63d6dfebbc62ebc35048671ab84e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"message\\\":\\\" reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.056391 6505 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.056624 6505 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.056903 6505 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.057003 6505 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:41.057747 6505 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 22:59:41.057848 6505 factory.go:656] Stopping watch factory\\\\nI1125 22:59:41.057876 6505 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:41.061029 6505 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1125 22:59:41.061058 6505 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1125 22:59:41.061115 6505 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:41.061145 6505 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 22:59:41.061227 6505 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mrsr4_openshift-ovn-kubernetes(0f81194f-4d48-4be6-9f73-8b34ed6b56cc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:54Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.510229 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://804844cbb5c83dd672d509fa84b24623edccd2d4a8371777a3cd1242458b2455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa353669d8909b162edcf8d25149ed8cc87138d07b81631c4345d25966d81abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:54Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.526639 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:54Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.537184 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:54Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.537585 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.537631 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.537680 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.537707 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.537776 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:54Z","lastTransitionTime":"2025-11-25T22:59:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.560865 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:54Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.578510 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:54Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.595772 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:54Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.610389 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:54Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.628999 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:54Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.640531 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.640586 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.640605 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.640630 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.640657 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:54Z","lastTransitionTime":"2025-11-25T22:59:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.647106 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e044b50-b07a-44a0-b69f-45fd4392de24\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9rjvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:54Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.664976 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:54Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.687185 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:54Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.715134 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:54Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.744704 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.744794 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.744813 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.744840 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.744860 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:54Z","lastTransitionTime":"2025-11-25T22:59:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.848435 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.848509 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.848534 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.848563 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.848583 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:54Z","lastTransitionTime":"2025-11-25T22:59:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.951918 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.952066 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.952085 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.952112 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:54 crc kubenswrapper[5045]: I1125 22:59:54.952133 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:54Z","lastTransitionTime":"2025-11-25T22:59:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.055564 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.056084 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.056305 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.056505 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.056702 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:55Z","lastTransitionTime":"2025-11-25T22:59:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.160608 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.161076 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.161373 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.161649 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.161950 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:55Z","lastTransitionTime":"2025-11-25T22:59:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.165094 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs\") pod \"network-metrics-daemon-9rjvw\" (UID: \"9e044b50-b07a-44a0-b69f-45fd4392de24\") " pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:55 crc kubenswrapper[5045]: E1125 22:59:55.165277 5045 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 22:59:55 crc kubenswrapper[5045]: E1125 22:59:55.165352 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs podName:9e044b50-b07a-44a0-b69f-45fd4392de24 nodeName:}" failed. No retries permitted until 2025-11-25 23:00:11.165328498 +0000 UTC m=+67.522987620 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs") pod "network-metrics-daemon-9rjvw" (UID: "9e044b50-b07a-44a0-b69f-45fd4392de24") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.265660 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.265760 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.265779 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.265806 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.265825 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:55Z","lastTransitionTime":"2025-11-25T22:59:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.370237 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.370797 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.370944 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.371093 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.371220 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:55Z","lastTransitionTime":"2025-11-25T22:59:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.395905 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:55 crc kubenswrapper[5045]: E1125 22:59:55.396127 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.397162 5045 scope.go:117] "RemoveContainer" containerID="50e1addf8140789f0c322c7b233f1895c69d63d6dfebbc62ebc35048671ab84e" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.475162 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.475239 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.475255 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.475290 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.475305 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:55Z","lastTransitionTime":"2025-11-25T22:59:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.579217 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.580011 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.580093 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.580125 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.580151 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:55Z","lastTransitionTime":"2025-11-25T22:59:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.683196 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.683266 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.683281 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.683307 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.683322 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:55Z","lastTransitionTime":"2025-11-25T22:59:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.787126 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.787204 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.787222 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.787252 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.787273 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:55Z","lastTransitionTime":"2025-11-25T22:59:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.870592 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovnkube-controller/1.log" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.875284 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerStarted","Data":"5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38"} Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.875870 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.890929 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.890994 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.891012 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.891038 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.891056 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:55Z","lastTransitionTime":"2025-11-25T22:59:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.909985 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:55Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.936659 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:55Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.985926 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 22:59:55 crc kubenswrapper[5045]: E1125 22:59:55.986069 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:00:27.986040418 +0000 UTC m=+84.343699550 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.986307 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:55 crc kubenswrapper[5045]: E1125 22:59:55.986464 5045 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 22:59:55 crc kubenswrapper[5045]: E1125 22:59:55.986561 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 23:00:27.986535952 +0000 UTC m=+84.344195124 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.987431 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:55 crc kubenswrapper[5045]: E1125 22:59:55.987947 5045 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 22:59:55 crc kubenswrapper[5045]: E1125 22:59:55.988009 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 23:00:27.987993753 +0000 UTC m=+84.345652975 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.995211 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.995277 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.995295 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.995318 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:55 crc kubenswrapper[5045]: I1125 22:59:55.995337 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:55Z","lastTransitionTime":"2025-11-25T22:59:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.010539 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.045925 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50e1addf8140789f0c322c7b233f1895c69d63d6dfebbc62ebc35048671ab84e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"message\\\":\\\" reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.056391 6505 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.056624 6505 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.056903 6505 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.057003 6505 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:41.057747 6505 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 22:59:41.057848 6505 factory.go:656] Stopping watch factory\\\\nI1125 22:59:41.057876 6505 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:41.061029 6505 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1125 22:59:41.061058 6505 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1125 22:59:41.061115 6505 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:41.061145 6505 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 22:59:41.061227 6505 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.060181 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.068370 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.078061 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://804844cbb5c83dd672d509fa84b24623edccd2d4a8371777a3cd1242458b2455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa353669d8909b162edcf8d25149ed8cc87138d07b81631c4345d25966d81abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.086749 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e044b50-b07a-44a0-b69f-45fd4392de24\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9rjvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.088464 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.088541 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:56 crc kubenswrapper[5045]: E1125 22:59:56.088636 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 22:59:56 crc kubenswrapper[5045]: E1125 22:59:56.088665 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 22:59:56 crc kubenswrapper[5045]: E1125 22:59:56.088636 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 22:59:56 crc kubenswrapper[5045]: E1125 22:59:56.088679 5045 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:56 crc kubenswrapper[5045]: E1125 22:59:56.088696 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 22:59:56 crc kubenswrapper[5045]: E1125 22:59:56.088728 5045 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:56 crc kubenswrapper[5045]: E1125 22:59:56.088745 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 23:00:28.088728914 +0000 UTC m=+84.446388026 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:56 crc kubenswrapper[5045]: E1125 22:59:56.088766 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 23:00:28.088754555 +0000 UTC m=+84.446413667 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.097806 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.097842 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.097851 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.097866 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.097877 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:56Z","lastTransitionTime":"2025-11-25T22:59:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.099326 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.110634 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.122177 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.133226 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.144211 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.155219 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.170023 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.185054 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.200105 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.200155 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.200163 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.200177 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.200186 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:56Z","lastTransitionTime":"2025-11-25T22:59:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.303369 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.303414 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.303426 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.303446 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.303461 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:56Z","lastTransitionTime":"2025-11-25T22:59:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.395834 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.396006 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:56 crc kubenswrapper[5045]: E1125 22:59:56.396096 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.396119 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:56 crc kubenswrapper[5045]: E1125 22:59:56.396298 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 22:59:56 crc kubenswrapper[5045]: E1125 22:59:56.396455 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.405972 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.406011 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.406019 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.406033 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.406043 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:56Z","lastTransitionTime":"2025-11-25T22:59:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.508905 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.508981 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.509004 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.509035 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.509059 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:56Z","lastTransitionTime":"2025-11-25T22:59:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.612515 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.612561 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.612569 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.612585 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.612596 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:56Z","lastTransitionTime":"2025-11-25T22:59:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.715995 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.716071 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.716090 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.716120 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.716145 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:56Z","lastTransitionTime":"2025-11-25T22:59:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.819487 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.819546 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.819561 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.819592 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.819606 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:56Z","lastTransitionTime":"2025-11-25T22:59:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.881882 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovnkube-controller/2.log" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.882680 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovnkube-controller/1.log" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.885875 5045 generic.go:334] "Generic (PLEG): container finished" podID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerID="5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38" exitCode=1 Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.885958 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerDied","Data":"5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38"} Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.886028 5045 scope.go:117] "RemoveContainer" containerID="50e1addf8140789f0c322c7b233f1895c69d63d6dfebbc62ebc35048671ab84e" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.886624 5045 scope.go:117] "RemoveContainer" containerID="5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38" Nov 25 22:59:56 crc kubenswrapper[5045]: E1125 22:59:56.886831 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mrsr4_openshift-ovn-kubernetes(0f81194f-4d48-4be6-9f73-8b34ed6b56cc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.907056 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.919260 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.922073 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.922118 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.922132 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.922150 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.922163 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:56Z","lastTransitionTime":"2025-11-25T22:59:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.931934 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://804844cbb5c83dd672d509fa84b24623edccd2d4a8371777a3cd1242458b2455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa353669d8909b162edcf8d25149ed8cc87138d07b81631c4345d25966d81abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.947780 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.963213 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.977570 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:56 crc kubenswrapper[5045]: I1125 22:59:56.991733 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e044b50-b07a-44a0-b69f-45fd4392de24\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9rjvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:56Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.009081 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:57Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.023436 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:57Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.024918 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.024969 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.024986 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.025012 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.025032 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:57Z","lastTransitionTime":"2025-11-25T22:59:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.038445 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:57Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.054216 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:57Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.069528 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:57Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.084525 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:57Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.117572 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50e1addf8140789f0c322c7b233f1895c69d63d6dfebbc62ebc35048671ab84e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:41Z\\\",\\\"message\\\":\\\" reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.056391 6505 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.056624 6505 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.056903 6505 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:41.057003 6505 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:41.057747 6505 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 22:59:41.057848 6505 factory.go:656] Stopping watch factory\\\\nI1125 22:59:41.057876 6505 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:41.061029 6505 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1125 22:59:41.061058 6505 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1125 22:59:41.061115 6505 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:41.061145 6505 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 22:59:41.061227 6505 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:56Z\\\",\\\"message\\\":\\\"a1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 22:59:56.430842 6696 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 22:59:56.430868 6696 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:56.430884 6696 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 22:59:56.430911 6696 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:56.430909 6696 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:56.431149 6696 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:56.431610 6696 factory.go:656] Stopping watch factory\\\\nI1125 22:59:56.431647 6696 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:56.431695 6696 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 22:59:56.431832 6696 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:57Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.128220 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.128257 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.128275 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.128300 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.128320 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:57Z","lastTransitionTime":"2025-11-25T22:59:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.135855 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:57Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.152641 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:57Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.231993 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.232056 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.232074 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.232100 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.232118 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:57Z","lastTransitionTime":"2025-11-25T22:59:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.335495 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.335550 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.335563 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.335590 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.335607 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:57Z","lastTransitionTime":"2025-11-25T22:59:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.396316 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:57 crc kubenswrapper[5045]: E1125 22:59:57.396566 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.438809 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.438870 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.438887 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.438915 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.438933 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:57Z","lastTransitionTime":"2025-11-25T22:59:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.541973 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.542015 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.542024 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.542040 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.542051 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:57Z","lastTransitionTime":"2025-11-25T22:59:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.645942 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.646016 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.646040 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.646071 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.646093 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:57Z","lastTransitionTime":"2025-11-25T22:59:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.749143 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.749254 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.749274 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.749299 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.749319 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:57Z","lastTransitionTime":"2025-11-25T22:59:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.852338 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.852387 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.852409 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.852434 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.852453 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:57Z","lastTransitionTime":"2025-11-25T22:59:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.892822 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovnkube-controller/2.log" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.898869 5045 scope.go:117] "RemoveContainer" containerID="5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38" Nov 25 22:59:57 crc kubenswrapper[5045]: E1125 22:59:57.899136 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mrsr4_openshift-ovn-kubernetes(0f81194f-4d48-4be6-9f73-8b34ed6b56cc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.924996 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:57Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.944584 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:57Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.956039 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.956096 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.956115 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.956142 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.956164 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:57Z","lastTransitionTime":"2025-11-25T22:59:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.966835 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:57Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:57 crc kubenswrapper[5045]: I1125 22:59:57.984775 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:57Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.012430 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:56Z\\\",\\\"message\\\":\\\"a1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 22:59:56.430842 6696 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 22:59:56.430868 6696 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:56.430884 6696 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 22:59:56.430911 6696 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:56.430909 6696 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:56.431149 6696 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:56.431610 6696 factory.go:656] Stopping watch factory\\\\nI1125 22:59:56.431647 6696 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:56.431695 6696 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 22:59:56.431832 6696 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:55Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mrsr4_openshift-ovn-kubernetes(0f81194f-4d48-4be6-9f73-8b34ed6b56cc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.032028 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.045883 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.056814 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.058588 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.058611 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.058619 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.058633 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.058642 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:58Z","lastTransitionTime":"2025-11-25T22:59:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.068882 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://804844cbb5c83dd672d509fa84b24623edccd2d4a8371777a3cd1242458b2455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa353669d8909b162edcf8d25149ed8cc87138d07b81631c4345d25966d81abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.081577 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.095362 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.108134 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.119001 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.131173 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e044b50-b07a-44a0-b69f-45fd4392de24\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9rjvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.145801 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.159726 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.161869 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.161907 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.161922 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.161942 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.161958 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:58Z","lastTransitionTime":"2025-11-25T22:59:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.265367 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.265434 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.265456 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.265482 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.265500 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:58Z","lastTransitionTime":"2025-11-25T22:59:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.368610 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.368654 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.368664 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.368679 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.368690 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:58Z","lastTransitionTime":"2025-11-25T22:59:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.396611 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.396823 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.396947 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 22:59:58 crc kubenswrapper[5045]: E1125 22:59:58.397006 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 22:59:58 crc kubenswrapper[5045]: E1125 22:59:58.396836 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 22:59:58 crc kubenswrapper[5045]: E1125 22:59:58.397785 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.472354 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.472432 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.472458 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.472491 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.472515 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:58Z","lastTransitionTime":"2025-11-25T22:59:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.490401 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.504514 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.512940 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://804844cbb5c83dd672d509fa84b24623edccd2d4a8371777a3cd1242458b2455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa353669d8909b162edcf8d25149ed8cc87138d07b81631c4345d25966d81abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.533765 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.550014 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.572018 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.576806 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.576844 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.576856 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.576872 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.576884 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:58Z","lastTransitionTime":"2025-11-25T22:59:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.591855 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.611234 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.629424 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.647670 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.662484 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e044b50-b07a-44a0-b69f-45fd4392de24\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9rjvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.680693 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.680742 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.680754 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.680770 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.680782 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:58Z","lastTransitionTime":"2025-11-25T22:59:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.684331 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.706369 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.724551 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.745566 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.767084 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.783988 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.784051 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.784069 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.784097 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.784115 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:58Z","lastTransitionTime":"2025-11-25T22:59:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.785425 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.812260 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:56Z\\\",\\\"message\\\":\\\"a1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 22:59:56.430842 6696 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 22:59:56.430868 6696 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:56.430884 6696 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 22:59:56.430911 6696 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:56.430909 6696 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:56.431149 6696 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:56.431610 6696 factory.go:656] Stopping watch factory\\\\nI1125 22:59:56.431647 6696 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:56.431695 6696 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 22:59:56.431832 6696 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:55Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mrsr4_openshift-ovn-kubernetes(0f81194f-4d48-4be6-9f73-8b34ed6b56cc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T22:59:58Z is after 2025-08-24T17:21:41Z" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.887293 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.887330 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.887342 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.887359 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.887370 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:58Z","lastTransitionTime":"2025-11-25T22:59:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.990598 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.990653 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.990671 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.990698 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:58 crc kubenswrapper[5045]: I1125 22:59:58.990756 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:58Z","lastTransitionTime":"2025-11-25T22:59:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.094326 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.094419 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.094448 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.094483 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.094508 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:59Z","lastTransitionTime":"2025-11-25T22:59:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.197310 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.197374 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.197391 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.197415 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.197434 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:59Z","lastTransitionTime":"2025-11-25T22:59:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.300831 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.300894 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.300913 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.300937 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.300957 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:59Z","lastTransitionTime":"2025-11-25T22:59:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.395795 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 22:59:59 crc kubenswrapper[5045]: E1125 22:59:59.396057 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.404018 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.404078 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.404096 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.404121 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.404143 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:59Z","lastTransitionTime":"2025-11-25T22:59:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.506657 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.506752 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.506781 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.506809 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.506827 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:59Z","lastTransitionTime":"2025-11-25T22:59:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.610114 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.610182 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.610209 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.610240 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.610260 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:59Z","lastTransitionTime":"2025-11-25T22:59:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.713782 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.713836 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.713852 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.713876 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.713893 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:59Z","lastTransitionTime":"2025-11-25T22:59:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.817302 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.817355 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.817371 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.817396 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.817413 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:59Z","lastTransitionTime":"2025-11-25T22:59:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.920539 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.920631 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.920796 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.920874 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 22:59:59 crc kubenswrapper[5045]: I1125 22:59:59.920900 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T22:59:59Z","lastTransitionTime":"2025-11-25T22:59:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.024078 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.024585 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.024783 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.024985 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.025158 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:00Z","lastTransitionTime":"2025-11-25T23:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.128256 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.128321 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.128383 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.128416 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.128437 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:00Z","lastTransitionTime":"2025-11-25T23:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.232209 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.232260 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.232419 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.232461 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.232478 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:00Z","lastTransitionTime":"2025-11-25T23:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.335453 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.336027 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.336206 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.336395 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.336589 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:00Z","lastTransitionTime":"2025-11-25T23:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.395742 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.395757 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.395761 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:00 crc kubenswrapper[5045]: E1125 23:00:00.396073 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:00 crc kubenswrapper[5045]: E1125 23:00:00.396126 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:00 crc kubenswrapper[5045]: E1125 23:00:00.395880 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.439508 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.439566 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.439588 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.439620 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.439641 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:00Z","lastTransitionTime":"2025-11-25T23:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.541822 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.541900 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.541926 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.541962 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.541989 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:00Z","lastTransitionTime":"2025-11-25T23:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.645153 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.645205 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.645220 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.645242 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.645257 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:00Z","lastTransitionTime":"2025-11-25T23:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.749793 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.749858 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.749876 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.749902 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.749921 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:00Z","lastTransitionTime":"2025-11-25T23:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.853649 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.854025 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.854104 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.854194 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.854289 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:00Z","lastTransitionTime":"2025-11-25T23:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.957098 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.957166 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.957209 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.957242 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:00 crc kubenswrapper[5045]: I1125 23:00:00.957261 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:00Z","lastTransitionTime":"2025-11-25T23:00:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.059708 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.059753 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.059766 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.059783 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.059793 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:01Z","lastTransitionTime":"2025-11-25T23:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.162839 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.162866 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.162874 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.162888 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.162897 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:01Z","lastTransitionTime":"2025-11-25T23:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.265821 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.265903 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.265928 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.265959 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.265982 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:01Z","lastTransitionTime":"2025-11-25T23:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.369902 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.369969 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.369987 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.370013 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.370034 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:01Z","lastTransitionTime":"2025-11-25T23:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.395662 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:01 crc kubenswrapper[5045]: E1125 23:00:01.395938 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.472326 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.472403 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.472426 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.472464 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.472486 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:01Z","lastTransitionTime":"2025-11-25T23:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.576461 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.576532 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.576550 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.576578 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.576598 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:01Z","lastTransitionTime":"2025-11-25T23:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.679854 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.679928 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.679946 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.679973 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.679991 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:01Z","lastTransitionTime":"2025-11-25T23:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.783552 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.783622 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.783641 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.783667 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.783685 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:01Z","lastTransitionTime":"2025-11-25T23:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.887248 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.887315 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.887334 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.887361 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.887380 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:01Z","lastTransitionTime":"2025-11-25T23:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.889702 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.889766 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.889785 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.889806 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.889825 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:01Z","lastTransitionTime":"2025-11-25T23:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:01 crc kubenswrapper[5045]: E1125 23:00:01.908957 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:01Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.916376 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.916418 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.916434 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.916457 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.916474 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:01Z","lastTransitionTime":"2025-11-25T23:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:01 crc kubenswrapper[5045]: E1125 23:00:01.944287 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:01Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.949612 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.949679 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.949698 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.949757 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.949777 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:01Z","lastTransitionTime":"2025-11-25T23:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:01 crc kubenswrapper[5045]: E1125 23:00:01.970264 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:01Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.974896 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.974954 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.974972 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.974996 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.975011 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:01Z","lastTransitionTime":"2025-11-25T23:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:01 crc kubenswrapper[5045]: E1125 23:00:01.993636 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:01Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.999413 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.999480 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.999508 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.999539 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:01 crc kubenswrapper[5045]: I1125 23:00:01.999561 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:01Z","lastTransitionTime":"2025-11-25T23:00:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:02 crc kubenswrapper[5045]: E1125 23:00:02.019767 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:02Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:02 crc kubenswrapper[5045]: E1125 23:00:02.020085 5045 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.022706 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.022821 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.022851 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.022885 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.022910 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:02Z","lastTransitionTime":"2025-11-25T23:00:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.125777 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.125894 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.125914 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.125939 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.125958 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:02Z","lastTransitionTime":"2025-11-25T23:00:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.228990 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.229039 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.229055 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.229077 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.229096 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:02Z","lastTransitionTime":"2025-11-25T23:00:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.331982 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.332059 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.332080 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.332108 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.332131 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:02Z","lastTransitionTime":"2025-11-25T23:00:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.396637 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.396689 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:02 crc kubenswrapper[5045]: E1125 23:00:02.396901 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.396943 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:02 crc kubenswrapper[5045]: E1125 23:00:02.397110 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:02 crc kubenswrapper[5045]: E1125 23:00:02.397230 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.435490 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.435552 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.435571 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.435595 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.435615 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:02Z","lastTransitionTime":"2025-11-25T23:00:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.539436 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.540004 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.540198 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.540364 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.540502 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:02Z","lastTransitionTime":"2025-11-25T23:00:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.644062 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.644145 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.644168 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.644199 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.644223 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:02Z","lastTransitionTime":"2025-11-25T23:00:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.747329 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.747389 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.747410 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.747435 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.747454 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:02Z","lastTransitionTime":"2025-11-25T23:00:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.851815 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.852047 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.852079 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.852147 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.852175 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:02Z","lastTransitionTime":"2025-11-25T23:00:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.955298 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.955358 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.955375 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.955401 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:02 crc kubenswrapper[5045]: I1125 23:00:02.955422 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:02Z","lastTransitionTime":"2025-11-25T23:00:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.058613 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.058670 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.058688 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.058769 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.058789 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:03Z","lastTransitionTime":"2025-11-25T23:00:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.161627 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.161676 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.161691 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.161717 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.161757 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:03Z","lastTransitionTime":"2025-11-25T23:00:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.264686 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.264771 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.264790 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.264821 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.264845 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:03Z","lastTransitionTime":"2025-11-25T23:00:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.368409 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.368469 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.368485 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.368523 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.368540 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:03Z","lastTransitionTime":"2025-11-25T23:00:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.397028 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:03 crc kubenswrapper[5045]: E1125 23:00:03.397631 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.471799 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.471856 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.471875 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.471898 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.471915 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:03Z","lastTransitionTime":"2025-11-25T23:00:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.576285 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.576350 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.576371 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.576397 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.576417 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:03Z","lastTransitionTime":"2025-11-25T23:00:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.679980 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.680032 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.680049 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.680074 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.680094 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:03Z","lastTransitionTime":"2025-11-25T23:00:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.788481 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.788576 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.788603 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.788633 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.788655 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:03Z","lastTransitionTime":"2025-11-25T23:00:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.891784 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.891868 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.891896 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.891931 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.892185 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:03Z","lastTransitionTime":"2025-11-25T23:00:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.996010 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.996459 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.996616 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.996813 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:03 crc kubenswrapper[5045]: I1125 23:00:03.996972 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:03Z","lastTransitionTime":"2025-11-25T23:00:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.100051 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.100116 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.100139 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.100166 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.100186 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:04Z","lastTransitionTime":"2025-11-25T23:00:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.203648 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.204070 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.204294 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.204525 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.204782 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:04Z","lastTransitionTime":"2025-11-25T23:00:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.307805 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.307874 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.307892 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.307916 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.307939 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:04Z","lastTransitionTime":"2025-11-25T23:00:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.396074 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:04 crc kubenswrapper[5045]: E1125 23:00:04.396274 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.396402 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:04 crc kubenswrapper[5045]: E1125 23:00:04.396617 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.396924 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:04 crc kubenswrapper[5045]: E1125 23:00:04.397228 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.411257 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.411303 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.411320 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.411347 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.411366 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:04Z","lastTransitionTime":"2025-11-25T23:00:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.419198 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:04Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.440317 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:04Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.462002 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e044b50-b07a-44a0-b69f-45fd4392de24\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9rjvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:04Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.485658 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:04Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.507598 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36eca948-ae19-4cad-b7d3-79835d6a261f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://163631d8567e2cd04119578ef686e51759ec95295743893d37b2f80701c11a97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b74ba0f23b2ba55eee278b2de153692a78291fa66786fa6840a1bb36f7a3c88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acb41a9b68b2aa00bbba92b627f8d231b0469068b708fb1e6f250f3c14e363c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57d85c9d70e22c55160692c2b290ede2224de3fdd2af0379d9bbcd8b30ed271b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://57d85c9d70e22c55160692c2b290ede2224de3fdd2af0379d9bbcd8b30ed271b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:04Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.513657 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.513753 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.513780 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.513811 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.513836 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:04Z","lastTransitionTime":"2025-11-25T23:00:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.530598 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:04Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.549405 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:04Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.569409 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:04Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.589631 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:04Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.609454 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:04Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.619152 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.619216 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.619236 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.619261 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.619280 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:04Z","lastTransitionTime":"2025-11-25T23:00:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.640154 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:56Z\\\",\\\"message\\\":\\\"a1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 22:59:56.430842 6696 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 22:59:56.430868 6696 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:56.430884 6696 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 22:59:56.430911 6696 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:56.430909 6696 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:56.431149 6696 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:56.431610 6696 factory.go:656] Stopping watch factory\\\\nI1125 22:59:56.431647 6696 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:56.431695 6696 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 22:59:56.431832 6696 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:55Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mrsr4_openshift-ovn-kubernetes(0f81194f-4d48-4be6-9f73-8b34ed6b56cc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:04Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.660233 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:04Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.676619 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:04Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.701245 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:04Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.722153 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.722221 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.722236 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.722260 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.722275 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:04Z","lastTransitionTime":"2025-11-25T23:00:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.750663 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:04Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.765065 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:04Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.775845 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://804844cbb5c83dd672d509fa84b24623edccd2d4a8371777a3cd1242458b2455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa353669d8909b162edcf8d25149ed8cc87138d07b81631c4345d25966d81abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:04Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.825400 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.825458 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.825469 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.825488 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.825500 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:04Z","lastTransitionTime":"2025-11-25T23:00:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.928064 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.928134 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.928155 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.928182 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:04 crc kubenswrapper[5045]: I1125 23:00:04.928205 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:04Z","lastTransitionTime":"2025-11-25T23:00:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.031122 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.031190 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.031209 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.031236 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.031258 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:05Z","lastTransitionTime":"2025-11-25T23:00:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.134244 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.134306 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.134322 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.134347 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.134366 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:05Z","lastTransitionTime":"2025-11-25T23:00:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.241228 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.241287 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.241304 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.241329 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.241348 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:05Z","lastTransitionTime":"2025-11-25T23:00:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.344252 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.344317 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.344334 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.344359 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.344378 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:05Z","lastTransitionTime":"2025-11-25T23:00:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.396350 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:05 crc kubenswrapper[5045]: E1125 23:00:05.396551 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.447467 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.447510 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.447522 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.447539 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.447551 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:05Z","lastTransitionTime":"2025-11-25T23:00:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.550226 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.550271 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.550284 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.550302 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.550313 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:05Z","lastTransitionTime":"2025-11-25T23:00:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.653901 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.653939 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.653948 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.653967 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.653980 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:05Z","lastTransitionTime":"2025-11-25T23:00:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.757900 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.757989 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.758001 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.758020 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.758033 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:05Z","lastTransitionTime":"2025-11-25T23:00:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.861092 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.861165 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.861180 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.861206 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.861220 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:05Z","lastTransitionTime":"2025-11-25T23:00:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.963907 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.963974 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.963986 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.964026 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:05 crc kubenswrapper[5045]: I1125 23:00:05.964038 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:05Z","lastTransitionTime":"2025-11-25T23:00:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.066654 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.066738 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.066751 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.066772 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.066787 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:06Z","lastTransitionTime":"2025-11-25T23:00:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.171300 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.171351 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.171365 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.171386 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.171400 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:06Z","lastTransitionTime":"2025-11-25T23:00:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.274551 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.274594 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.274603 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.274621 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.274632 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:06Z","lastTransitionTime":"2025-11-25T23:00:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.377425 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.377483 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.377499 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.377524 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.377542 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:06Z","lastTransitionTime":"2025-11-25T23:00:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.395825 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.395824 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.395854 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:06 crc kubenswrapper[5045]: E1125 23:00:06.396039 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:06 crc kubenswrapper[5045]: E1125 23:00:06.396099 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:06 crc kubenswrapper[5045]: E1125 23:00:06.396423 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.480485 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.480547 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.480564 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.480590 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.480610 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:06Z","lastTransitionTime":"2025-11-25T23:00:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.583475 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.583549 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.583568 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.583597 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.583625 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:06Z","lastTransitionTime":"2025-11-25T23:00:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.686752 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.686814 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.686830 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.686860 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.686883 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:06Z","lastTransitionTime":"2025-11-25T23:00:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.790285 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.790352 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.790369 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.790400 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.790420 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:06Z","lastTransitionTime":"2025-11-25T23:00:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.894007 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.894092 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.894105 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.894129 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.894144 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:06Z","lastTransitionTime":"2025-11-25T23:00:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.997468 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.997533 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.997554 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.997582 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:06 crc kubenswrapper[5045]: I1125 23:00:06.997616 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:06Z","lastTransitionTime":"2025-11-25T23:00:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.100294 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.100358 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.100377 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.100402 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.100420 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:07Z","lastTransitionTime":"2025-11-25T23:00:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.203185 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.203264 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.203284 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.203311 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.203329 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:07Z","lastTransitionTime":"2025-11-25T23:00:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.307111 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.307198 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.307226 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.307260 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.307288 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:07Z","lastTransitionTime":"2025-11-25T23:00:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.395617 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:07 crc kubenswrapper[5045]: E1125 23:00:07.395882 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.410279 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.410336 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.410356 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.410384 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.410407 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:07Z","lastTransitionTime":"2025-11-25T23:00:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.514934 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.515009 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.515026 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.515052 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.515072 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:07Z","lastTransitionTime":"2025-11-25T23:00:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.617850 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.617903 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.617917 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.617938 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.617955 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:07Z","lastTransitionTime":"2025-11-25T23:00:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.721798 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.721864 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.721887 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.721921 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.721947 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:07Z","lastTransitionTime":"2025-11-25T23:00:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.824351 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.824393 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.824402 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.824435 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.824447 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:07Z","lastTransitionTime":"2025-11-25T23:00:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.926756 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.926809 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.926829 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.926849 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:07 crc kubenswrapper[5045]: I1125 23:00:07.926861 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:07Z","lastTransitionTime":"2025-11-25T23:00:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.029185 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.029253 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.029270 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.029298 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.029317 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:08Z","lastTransitionTime":"2025-11-25T23:00:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.132112 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.132155 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.132166 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.132182 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.132194 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:08Z","lastTransitionTime":"2025-11-25T23:00:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.235443 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.235492 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.235500 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.235517 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.235528 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:08Z","lastTransitionTime":"2025-11-25T23:00:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.337646 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.337685 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.337696 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.337729 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.337742 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:08Z","lastTransitionTime":"2025-11-25T23:00:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.395652 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.395672 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.395772 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:08 crc kubenswrapper[5045]: E1125 23:00:08.396005 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:08 crc kubenswrapper[5045]: E1125 23:00:08.396204 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:08 crc kubenswrapper[5045]: E1125 23:00:08.396315 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.441044 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.441087 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.441097 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.441110 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.441123 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:08Z","lastTransitionTime":"2025-11-25T23:00:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.573361 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.573449 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.573472 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.573498 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.573518 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:08Z","lastTransitionTime":"2025-11-25T23:00:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.676654 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.676697 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.676706 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.676746 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.676758 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:08Z","lastTransitionTime":"2025-11-25T23:00:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.779599 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.779665 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.779682 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.780246 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.780266 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:08Z","lastTransitionTime":"2025-11-25T23:00:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.883480 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.883549 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.883561 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.883587 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.883601 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:08Z","lastTransitionTime":"2025-11-25T23:00:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.987175 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.987229 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.987238 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.987256 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:08 crc kubenswrapper[5045]: I1125 23:00:08.987269 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:08Z","lastTransitionTime":"2025-11-25T23:00:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.090849 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.090920 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.090933 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.090956 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.090968 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:09Z","lastTransitionTime":"2025-11-25T23:00:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.194580 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.194635 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.194645 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.194662 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.194674 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:09Z","lastTransitionTime":"2025-11-25T23:00:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.296788 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.296832 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.296843 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.296867 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.296883 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:09Z","lastTransitionTime":"2025-11-25T23:00:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.396350 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:09 crc kubenswrapper[5045]: E1125 23:00:09.396549 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.397306 5045 scope.go:117] "RemoveContainer" containerID="5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38" Nov 25 23:00:09 crc kubenswrapper[5045]: E1125 23:00:09.397604 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mrsr4_openshift-ovn-kubernetes(0f81194f-4d48-4be6-9f73-8b34ed6b56cc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.399462 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.399493 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.399503 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.399517 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.399528 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:09Z","lastTransitionTime":"2025-11-25T23:00:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.502775 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.502828 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.502840 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.502860 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.502873 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:09Z","lastTransitionTime":"2025-11-25T23:00:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.605610 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.605661 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.605671 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.605694 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.605707 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:09Z","lastTransitionTime":"2025-11-25T23:00:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.709517 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.709591 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.709607 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.709635 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.709653 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:09Z","lastTransitionTime":"2025-11-25T23:00:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.812921 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.812985 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.813002 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.813027 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.813045 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:09Z","lastTransitionTime":"2025-11-25T23:00:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.915668 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.915735 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.915753 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.915777 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:09 crc kubenswrapper[5045]: I1125 23:00:09.915787 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:09Z","lastTransitionTime":"2025-11-25T23:00:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.019663 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.019749 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.019772 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.019799 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.019819 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:10Z","lastTransitionTime":"2025-11-25T23:00:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.122782 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.122855 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.122872 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.122895 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.122909 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:10Z","lastTransitionTime":"2025-11-25T23:00:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.225859 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.225906 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.225921 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.225945 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.225959 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:10Z","lastTransitionTime":"2025-11-25T23:00:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.329222 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.329269 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.329278 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.329295 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.329306 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:10Z","lastTransitionTime":"2025-11-25T23:00:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.396017 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.396061 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:10 crc kubenswrapper[5045]: E1125 23:00:10.396218 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.396260 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:10 crc kubenswrapper[5045]: E1125 23:00:10.396409 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:10 crc kubenswrapper[5045]: E1125 23:00:10.396483 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.432032 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.432095 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.432107 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.432133 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.432147 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:10Z","lastTransitionTime":"2025-11-25T23:00:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.535361 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.535746 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.535821 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.535912 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.535986 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:10Z","lastTransitionTime":"2025-11-25T23:00:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.639460 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.639529 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.639551 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.639580 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.639597 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:10Z","lastTransitionTime":"2025-11-25T23:00:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.742507 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.742552 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.742567 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.742590 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.742606 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:10Z","lastTransitionTime":"2025-11-25T23:00:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.845886 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.845946 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.845957 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.845976 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.845988 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:10Z","lastTransitionTime":"2025-11-25T23:00:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.948272 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.948306 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.948317 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.948336 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:10 crc kubenswrapper[5045]: I1125 23:00:10.948351 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:10Z","lastTransitionTime":"2025-11-25T23:00:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.051886 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.051941 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.051953 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.051973 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.051986 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:11Z","lastTransitionTime":"2025-11-25T23:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.154486 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.154527 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.154541 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.154558 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.154570 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:11Z","lastTransitionTime":"2025-11-25T23:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.257138 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.257191 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.257207 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.257232 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.257250 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:11Z","lastTransitionTime":"2025-11-25T23:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.259664 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs\") pod \"network-metrics-daemon-9rjvw\" (UID: \"9e044b50-b07a-44a0-b69f-45fd4392de24\") " pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:11 crc kubenswrapper[5045]: E1125 23:00:11.259843 5045 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 23:00:11 crc kubenswrapper[5045]: E1125 23:00:11.259938 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs podName:9e044b50-b07a-44a0-b69f-45fd4392de24 nodeName:}" failed. No retries permitted until 2025-11-25 23:00:43.259908138 +0000 UTC m=+99.617567290 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs") pod "network-metrics-daemon-9rjvw" (UID: "9e044b50-b07a-44a0-b69f-45fd4392de24") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.359976 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.360039 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.360052 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.360074 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.360106 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:11Z","lastTransitionTime":"2025-11-25T23:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.396649 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:11 crc kubenswrapper[5045]: E1125 23:00:11.396866 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.463082 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.463158 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.463183 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.463214 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.463234 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:11Z","lastTransitionTime":"2025-11-25T23:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.565940 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.565982 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.565993 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.566013 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.566024 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:11Z","lastTransitionTime":"2025-11-25T23:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.668683 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.668745 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.668758 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.668775 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.668786 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:11Z","lastTransitionTime":"2025-11-25T23:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.771626 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.771694 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.771746 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.771771 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.771790 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:11Z","lastTransitionTime":"2025-11-25T23:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.874866 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.874919 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.874932 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.874953 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.874972 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:11Z","lastTransitionTime":"2025-11-25T23:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.977925 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.977983 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.977995 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.978021 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:11 crc kubenswrapper[5045]: I1125 23:00:11.978036 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:11Z","lastTransitionTime":"2025-11-25T23:00:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.080229 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.080277 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.080286 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.080307 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.080320 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:12Z","lastTransitionTime":"2025-11-25T23:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.183691 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.183768 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.183787 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.183813 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.183833 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:12Z","lastTransitionTime":"2025-11-25T23:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.286874 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.286934 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.287000 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.287049 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.287069 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:12Z","lastTransitionTime":"2025-11-25T23:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.297419 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.297474 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.297487 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.297514 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.297532 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:12Z","lastTransitionTime":"2025-11-25T23:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:12 crc kubenswrapper[5045]: E1125 23:00:12.311053 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:12Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.315976 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.316016 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.316033 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.316056 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.316074 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:12Z","lastTransitionTime":"2025-11-25T23:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:12 crc kubenswrapper[5045]: E1125 23:00:12.329601 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:12Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.333802 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.333857 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.333870 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.333900 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.333916 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:12Z","lastTransitionTime":"2025-11-25T23:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:12 crc kubenswrapper[5045]: E1125 23:00:12.350903 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:12Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.355096 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.355147 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.355161 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.355180 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.355198 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:12Z","lastTransitionTime":"2025-11-25T23:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:12 crc kubenswrapper[5045]: E1125 23:00:12.367993 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:12Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.373664 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.373742 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.373756 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.373779 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.373793 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:12Z","lastTransitionTime":"2025-11-25T23:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:12 crc kubenswrapper[5045]: E1125 23:00:12.391514 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:12Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:12 crc kubenswrapper[5045]: E1125 23:00:12.391691 5045 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.393568 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.393619 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.393631 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.393648 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.393660 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:12Z","lastTransitionTime":"2025-11-25T23:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.396023 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:12 crc kubenswrapper[5045]: E1125 23:00:12.396130 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.396211 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.396288 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:12 crc kubenswrapper[5045]: E1125 23:00:12.396425 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:12 crc kubenswrapper[5045]: E1125 23:00:12.396579 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.496868 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.496909 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.496921 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.496938 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.496954 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:12Z","lastTransitionTime":"2025-11-25T23:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.600204 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.600264 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.600280 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.600300 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.600313 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:12Z","lastTransitionTime":"2025-11-25T23:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.703388 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.703443 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.703456 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.703476 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.703489 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:12Z","lastTransitionTime":"2025-11-25T23:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.806072 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.806114 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.806123 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.806140 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.806155 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:12Z","lastTransitionTime":"2025-11-25T23:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.908997 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.909053 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.909066 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.909085 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:12 crc kubenswrapper[5045]: I1125 23:00:12.909097 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:12Z","lastTransitionTime":"2025-11-25T23:00:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.011779 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.011847 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.011867 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.011895 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.011914 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:13Z","lastTransitionTime":"2025-11-25T23:00:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.114876 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.115281 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.115382 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.115505 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.115605 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:13Z","lastTransitionTime":"2025-11-25T23:00:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.218040 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.218096 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.218112 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.218136 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.218152 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:13Z","lastTransitionTime":"2025-11-25T23:00:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.321415 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.321470 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.321485 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.321507 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.321525 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:13Z","lastTransitionTime":"2025-11-25T23:00:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.396301 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:13 crc kubenswrapper[5045]: E1125 23:00:13.396551 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.424451 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.424553 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.424576 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.424610 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.424630 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:13Z","lastTransitionTime":"2025-11-25T23:00:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.527741 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.527827 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.527849 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.527878 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.527900 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:13Z","lastTransitionTime":"2025-11-25T23:00:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.630799 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.630880 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.630898 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.630927 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.630950 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:13Z","lastTransitionTime":"2025-11-25T23:00:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.734434 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.734486 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.734497 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.734517 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.734530 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:13Z","lastTransitionTime":"2025-11-25T23:00:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.837815 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.837871 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.837883 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.837921 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.837932 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:13Z","lastTransitionTime":"2025-11-25T23:00:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.940175 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.940236 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.940253 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.940281 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:13 crc kubenswrapper[5045]: I1125 23:00:13.940300 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:13Z","lastTransitionTime":"2025-11-25T23:00:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.043643 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.043745 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.043762 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.043783 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.043796 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:14Z","lastTransitionTime":"2025-11-25T23:00:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.146261 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.146317 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.146332 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.146353 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.146365 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:14Z","lastTransitionTime":"2025-11-25T23:00:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.248861 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.248918 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.248930 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.248952 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.248970 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:14Z","lastTransitionTime":"2025-11-25T23:00:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.351940 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.352005 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.352022 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.352048 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.352066 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:14Z","lastTransitionTime":"2025-11-25T23:00:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.396264 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.396360 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.396423 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:14 crc kubenswrapper[5045]: E1125 23:00:14.397767 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:14 crc kubenswrapper[5045]: E1125 23:00:14.398005 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:14 crc kubenswrapper[5045]: E1125 23:00:14.398104 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.409565 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:14Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.425276 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://804844cbb5c83dd672d509fa84b24623edccd2d4a8371777a3cd1242458b2455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa353669d8909b162edcf8d25149ed8cc87138d07b81631c4345d25966d81abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:14Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.443085 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:14Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.455310 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.455360 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.455378 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.455405 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.455423 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:14Z","lastTransitionTime":"2025-11-25T23:00:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.457474 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36eca948-ae19-4cad-b7d3-79835d6a261f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://163631d8567e2cd04119578ef686e51759ec95295743893d37b2f80701c11a97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b74ba0f23b2ba55eee278b2de153692a78291fa66786fa6840a1bb36f7a3c88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acb41a9b68b2aa00bbba92b627f8d231b0469068b708fb1e6f250f3c14e363c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57d85c9d70e22c55160692c2b290ede2224de3fdd2af0379d9bbcd8b30ed271b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://57d85c9d70e22c55160692c2b290ede2224de3fdd2af0379d9bbcd8b30ed271b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:14Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.470401 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:14Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.487025 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:14Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.500832 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:14Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.514423 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:14Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.529923 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:14Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.543384 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e044b50-b07a-44a0-b69f-45fd4392de24\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9rjvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:14Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.557818 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.557855 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.557865 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.557881 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.557892 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:14Z","lastTransitionTime":"2025-11-25T23:00:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.568518 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:14Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.586314 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:14Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.606135 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:14Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.625593 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:14Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.639076 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:14Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.660057 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.660085 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.660093 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.660107 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.660117 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:14Z","lastTransitionTime":"2025-11-25T23:00:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.661619 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:14Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.679662 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:56Z\\\",\\\"message\\\":\\\"a1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 22:59:56.430842 6696 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 22:59:56.430868 6696 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:56.430884 6696 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 22:59:56.430911 6696 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:56.430909 6696 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:56.431149 6696 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:56.431610 6696 factory.go:656] Stopping watch factory\\\\nI1125 22:59:56.431647 6696 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:56.431695 6696 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 22:59:56.431832 6696 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:55Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mrsr4_openshift-ovn-kubernetes(0f81194f-4d48-4be6-9f73-8b34ed6b56cc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:14Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.763939 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.763980 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.763989 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.764004 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.764016 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:14Z","lastTransitionTime":"2025-11-25T23:00:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.867007 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.867075 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.867093 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.867135 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.867154 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:14Z","lastTransitionTime":"2025-11-25T23:00:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.969535 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.969607 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.969628 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.969655 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:14 crc kubenswrapper[5045]: I1125 23:00:14.969682 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:14Z","lastTransitionTime":"2025-11-25T23:00:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.072429 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.072504 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.072525 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.072558 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.072576 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:15Z","lastTransitionTime":"2025-11-25T23:00:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.176100 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.176170 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.176188 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.176218 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.176241 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:15Z","lastTransitionTime":"2025-11-25T23:00:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.278869 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.278916 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.278927 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.278944 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.278957 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:15Z","lastTransitionTime":"2025-11-25T23:00:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.381430 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.381478 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.381512 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.381532 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.381544 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:15Z","lastTransitionTime":"2025-11-25T23:00:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.396264 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:15 crc kubenswrapper[5045]: E1125 23:00:15.396463 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.484020 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.484085 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.484103 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.484131 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.484154 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:15Z","lastTransitionTime":"2025-11-25T23:00:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.587744 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.587808 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.587827 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.587852 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.587870 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:15Z","lastTransitionTime":"2025-11-25T23:00:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.691125 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.691202 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.691224 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.691253 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.691277 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:15Z","lastTransitionTime":"2025-11-25T23:00:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.795350 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.795460 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.795535 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.795569 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.795752 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:15Z","lastTransitionTime":"2025-11-25T23:00:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.899350 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.899407 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.899420 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.899442 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.899455 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:15Z","lastTransitionTime":"2025-11-25T23:00:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.960360 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-ht6dm_e971a47d-97d5-4a21-a255-2497b2b3cbbc/kube-multus/0.log" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.960416 5045 generic.go:334] "Generic (PLEG): container finished" podID="e971a47d-97d5-4a21-a255-2497b2b3cbbc" containerID="69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5" exitCode=1 Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.960450 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-ht6dm" event={"ID":"e971a47d-97d5-4a21-a255-2497b2b3cbbc","Type":"ContainerDied","Data":"69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5"} Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.960863 5045 scope.go:117] "RemoveContainer" containerID="69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.980873 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:15Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:15 crc kubenswrapper[5045]: I1125 23:00:15.999126 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:15Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.005444 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.005475 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.005486 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.005500 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.005510 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:16Z","lastTransitionTime":"2025-11-25T23:00:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.013499 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T23:00:15Z\\\",\\\"message\\\":\\\"2025-11-25T22:59:29+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_31a85b6f-b119-4b3d-be49-18cdcc8d3702\\\\n2025-11-25T22:59:29+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_31a85b6f-b119-4b3d-be49-18cdcc8d3702 to /host/opt/cni/bin/\\\\n2025-11-25T22:59:30Z [verbose] multus-daemon started\\\\n2025-11-25T22:59:30Z [verbose] Readiness Indicator file check\\\\n2025-11-25T23:00:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:16Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.043922 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:56Z\\\",\\\"message\\\":\\\"a1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 22:59:56.430842 6696 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 22:59:56.430868 6696 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:56.430884 6696 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 22:59:56.430911 6696 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:56.430909 6696 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:56.431149 6696 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:56.431610 6696 factory.go:656] Stopping watch factory\\\\nI1125 22:59:56.431647 6696 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:56.431695 6696 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 22:59:56.431832 6696 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:55Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mrsr4_openshift-ovn-kubernetes(0f81194f-4d48-4be6-9f73-8b34ed6b56cc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:16Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.063051 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:16Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.078454 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:16Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.098189 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://804844cbb5c83dd672d509fa84b24623edccd2d4a8371777a3cd1242458b2455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa353669d8909b162edcf8d25149ed8cc87138d07b81631c4345d25966d81abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:16Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.108318 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.108381 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.108401 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.108436 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.108457 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:16Z","lastTransitionTime":"2025-11-25T23:00:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.113108 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:16Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.123686 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e044b50-b07a-44a0-b69f-45fd4392de24\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9rjvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:16Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.142842 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:16Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.155517 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36eca948-ae19-4cad-b7d3-79835d6a261f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://163631d8567e2cd04119578ef686e51759ec95295743893d37b2f80701c11a97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b74ba0f23b2ba55eee278b2de153692a78291fa66786fa6840a1bb36f7a3c88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acb41a9b68b2aa00bbba92b627f8d231b0469068b708fb1e6f250f3c14e363c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57d85c9d70e22c55160692c2b290ede2224de3fdd2af0379d9bbcd8b30ed271b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://57d85c9d70e22c55160692c2b290ede2224de3fdd2af0379d9bbcd8b30ed271b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:16Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.173296 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:16Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.185029 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:16Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.197568 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:16Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.210917 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:16Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.212112 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.212148 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.212164 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.212185 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.212200 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:16Z","lastTransitionTime":"2025-11-25T23:00:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.231171 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:16Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.246996 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:16Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.314761 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.314803 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.314812 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.314830 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.314840 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:16Z","lastTransitionTime":"2025-11-25T23:00:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.396692 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.396761 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.396828 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:16 crc kubenswrapper[5045]: E1125 23:00:16.396867 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:16 crc kubenswrapper[5045]: E1125 23:00:16.397034 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:16 crc kubenswrapper[5045]: E1125 23:00:16.397160 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.417072 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.417120 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.417132 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.417147 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.417161 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:16Z","lastTransitionTime":"2025-11-25T23:00:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.519230 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.519301 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.519318 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.519348 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.519368 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:16Z","lastTransitionTime":"2025-11-25T23:00:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.623038 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.623081 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.623091 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.623106 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.623116 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:16Z","lastTransitionTime":"2025-11-25T23:00:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.726135 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.726201 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.726219 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.726243 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.726260 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:16Z","lastTransitionTime":"2025-11-25T23:00:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.828907 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.828959 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.828971 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.828985 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.828997 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:16Z","lastTransitionTime":"2025-11-25T23:00:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.931087 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.931117 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.931125 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.931139 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.931152 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:16Z","lastTransitionTime":"2025-11-25T23:00:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.966541 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-ht6dm_e971a47d-97d5-4a21-a255-2497b2b3cbbc/kube-multus/0.log" Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.966611 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-ht6dm" event={"ID":"e971a47d-97d5-4a21-a255-2497b2b3cbbc","Type":"ContainerStarted","Data":"bfd5532dcf7609f7f39d9a3856d92510f05e1b76dd721f0b7c1eb4443b94f7d3"} Nov 25 23:00:16 crc kubenswrapper[5045]: I1125 23:00:16.985379 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:16Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.005069 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:17Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.021392 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfd5532dcf7609f7f39d9a3856d92510f05e1b76dd721f0b7c1eb4443b94f7d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T23:00:15Z\\\",\\\"message\\\":\\\"2025-11-25T22:59:29+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_31a85b6f-b119-4b3d-be49-18cdcc8d3702\\\\n2025-11-25T22:59:29+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_31a85b6f-b119-4b3d-be49-18cdcc8d3702 to /host/opt/cni/bin/\\\\n2025-11-25T22:59:30Z [verbose] multus-daemon started\\\\n2025-11-25T22:59:30Z [verbose] Readiness Indicator file check\\\\n2025-11-25T23:00:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T23:00:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:17Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.033238 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.033283 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.033299 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.033323 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.033337 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:17Z","lastTransitionTime":"2025-11-25T23:00:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.042472 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:56Z\\\",\\\"message\\\":\\\"a1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 22:59:56.430842 6696 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 22:59:56.430868 6696 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:56.430884 6696 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 22:59:56.430911 6696 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:56.430909 6696 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:56.431149 6696 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:56.431610 6696 factory.go:656] Stopping watch factory\\\\nI1125 22:59:56.431647 6696 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:56.431695 6696 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 22:59:56.431832 6696 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:55Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mrsr4_openshift-ovn-kubernetes(0f81194f-4d48-4be6-9f73-8b34ed6b56cc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:17Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.060234 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:17Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.070523 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:17Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.085304 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://804844cbb5c83dd672d509fa84b24623edccd2d4a8371777a3cd1242458b2455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa353669d8909b162edcf8d25149ed8cc87138d07b81631c4345d25966d81abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:17Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.099411 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:17Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.114505 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e044b50-b07a-44a0-b69f-45fd4392de24\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9rjvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:17Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.128340 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:17Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.135597 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.135623 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.135637 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.135656 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.135667 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:17Z","lastTransitionTime":"2025-11-25T23:00:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.143623 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36eca948-ae19-4cad-b7d3-79835d6a261f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://163631d8567e2cd04119578ef686e51759ec95295743893d37b2f80701c11a97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b74ba0f23b2ba55eee278b2de153692a78291fa66786fa6840a1bb36f7a3c88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acb41a9b68b2aa00bbba92b627f8d231b0469068b708fb1e6f250f3c14e363c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57d85c9d70e22c55160692c2b290ede2224de3fdd2af0379d9bbcd8b30ed271b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://57d85c9d70e22c55160692c2b290ede2224de3fdd2af0379d9bbcd8b30ed271b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:17Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.162795 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:17Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.179109 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:17Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.194685 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:17Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.213294 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:17Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.231473 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:17Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.243901 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.243972 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.243991 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.244020 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.244038 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:17Z","lastTransitionTime":"2025-11-25T23:00:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.248233 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:17Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.346856 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.346907 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.346916 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.346945 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.346960 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:17Z","lastTransitionTime":"2025-11-25T23:00:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.395565 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:17 crc kubenswrapper[5045]: E1125 23:00:17.395702 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.449508 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.449562 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.449572 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.449590 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.449602 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:17Z","lastTransitionTime":"2025-11-25T23:00:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.553192 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.553246 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.553262 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.553282 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.553296 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:17Z","lastTransitionTime":"2025-11-25T23:00:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.656329 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.656376 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.656396 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.656421 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.656438 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:17Z","lastTransitionTime":"2025-11-25T23:00:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.759184 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.759246 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.759265 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.759327 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.759346 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:17Z","lastTransitionTime":"2025-11-25T23:00:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.861451 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.861495 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.861505 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.861523 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.861536 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:17Z","lastTransitionTime":"2025-11-25T23:00:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.964293 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.964348 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.964359 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.964376 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:17 crc kubenswrapper[5045]: I1125 23:00:17.964388 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:17Z","lastTransitionTime":"2025-11-25T23:00:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.067317 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.067395 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.067410 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.067434 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.067451 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:18Z","lastTransitionTime":"2025-11-25T23:00:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.170363 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.170426 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.170444 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.170471 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.170490 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:18Z","lastTransitionTime":"2025-11-25T23:00:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.273647 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.273750 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.273775 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.273803 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.273827 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:18Z","lastTransitionTime":"2025-11-25T23:00:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.377478 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.377556 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.377574 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.377602 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.377655 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:18Z","lastTransitionTime":"2025-11-25T23:00:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.396099 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.396186 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:18 crc kubenswrapper[5045]: E1125 23:00:18.396334 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.396361 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:18 crc kubenswrapper[5045]: E1125 23:00:18.396507 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:18 crc kubenswrapper[5045]: E1125 23:00:18.396690 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.480433 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.480499 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.480516 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.480543 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.480565 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:18Z","lastTransitionTime":"2025-11-25T23:00:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.584619 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.584703 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.584751 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.584784 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.584809 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:18Z","lastTransitionTime":"2025-11-25T23:00:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.701211 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.701257 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.701270 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.701287 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.701299 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:18Z","lastTransitionTime":"2025-11-25T23:00:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.805256 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.805309 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.805326 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.805348 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.805364 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:18Z","lastTransitionTime":"2025-11-25T23:00:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.908932 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.908998 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.909016 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.909042 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:18 crc kubenswrapper[5045]: I1125 23:00:18.909062 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:18Z","lastTransitionTime":"2025-11-25T23:00:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.012507 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.012570 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.012583 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.012606 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.012622 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:19Z","lastTransitionTime":"2025-11-25T23:00:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.115970 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.116042 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.116059 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.116085 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.116107 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:19Z","lastTransitionTime":"2025-11-25T23:00:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.219747 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.219822 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.219847 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.219878 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.219899 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:19Z","lastTransitionTime":"2025-11-25T23:00:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.322440 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.322514 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.322532 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.322558 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.322578 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:19Z","lastTransitionTime":"2025-11-25T23:00:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.396664 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:19 crc kubenswrapper[5045]: E1125 23:00:19.396962 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.426899 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.426966 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.426985 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.427014 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.427052 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:19Z","lastTransitionTime":"2025-11-25T23:00:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.530676 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.530778 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.530800 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.530831 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.530852 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:19Z","lastTransitionTime":"2025-11-25T23:00:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.634147 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.634212 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.634233 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.634267 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.634289 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:19Z","lastTransitionTime":"2025-11-25T23:00:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.738158 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.738227 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.738245 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.738280 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.738300 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:19Z","lastTransitionTime":"2025-11-25T23:00:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.846743 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.846803 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.846817 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.846839 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.846855 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:19Z","lastTransitionTime":"2025-11-25T23:00:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.952828 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.952947 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.952967 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.952996 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:19 crc kubenswrapper[5045]: I1125 23:00:19.953025 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:19Z","lastTransitionTime":"2025-11-25T23:00:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.057217 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.057277 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.057295 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.057322 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.057341 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:20Z","lastTransitionTime":"2025-11-25T23:00:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.161497 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.161588 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.161606 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.161636 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.161655 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:20Z","lastTransitionTime":"2025-11-25T23:00:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.265252 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.265321 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.265337 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.265361 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.265377 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:20Z","lastTransitionTime":"2025-11-25T23:00:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.368933 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.368997 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.369017 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.369043 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.369061 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:20Z","lastTransitionTime":"2025-11-25T23:00:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.395587 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.395743 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.395814 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:20 crc kubenswrapper[5045]: E1125 23:00:20.395904 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:20 crc kubenswrapper[5045]: E1125 23:00:20.396010 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:20 crc kubenswrapper[5045]: E1125 23:00:20.396141 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.472707 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.472787 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.472838 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.472870 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.472888 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:20Z","lastTransitionTime":"2025-11-25T23:00:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.575798 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.575859 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.575876 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.575901 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.575919 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:20Z","lastTransitionTime":"2025-11-25T23:00:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.679663 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.679757 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.679776 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.679802 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.679823 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:20Z","lastTransitionTime":"2025-11-25T23:00:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.783271 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.783340 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.783357 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.783383 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.783400 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:20Z","lastTransitionTime":"2025-11-25T23:00:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.886799 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.886883 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.886938 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.886969 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.886991 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:20Z","lastTransitionTime":"2025-11-25T23:00:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.990489 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.990548 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.990563 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.990587 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:20 crc kubenswrapper[5045]: I1125 23:00:20.990605 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:20Z","lastTransitionTime":"2025-11-25T23:00:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.094180 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.094255 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.094273 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.094299 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.094318 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:21Z","lastTransitionTime":"2025-11-25T23:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.197248 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.197350 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.197370 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.197397 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.197416 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:21Z","lastTransitionTime":"2025-11-25T23:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.300866 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.300938 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.300959 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.300992 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.301015 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:21Z","lastTransitionTime":"2025-11-25T23:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.396671 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:21 crc kubenswrapper[5045]: E1125 23:00:21.397003 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.405525 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.405593 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.405616 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.405644 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.405662 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:21Z","lastTransitionTime":"2025-11-25T23:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.414385 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.508552 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.508607 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.508624 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.508646 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.508660 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:21Z","lastTransitionTime":"2025-11-25T23:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.612632 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.612676 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.612693 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.612731 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.612749 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:21Z","lastTransitionTime":"2025-11-25T23:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.716775 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.716840 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.716861 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.716893 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.716914 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:21Z","lastTransitionTime":"2025-11-25T23:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.820676 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.820814 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.820843 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.820877 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.820903 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:21Z","lastTransitionTime":"2025-11-25T23:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.923475 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.923539 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.923560 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.923590 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:21 crc kubenswrapper[5045]: I1125 23:00:21.923612 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:21Z","lastTransitionTime":"2025-11-25T23:00:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.027835 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.027955 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.027968 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.027990 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.028007 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:22Z","lastTransitionTime":"2025-11-25T23:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.131536 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.131610 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.131628 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.131658 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.131678 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:22Z","lastTransitionTime":"2025-11-25T23:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.235173 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.235233 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.235251 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.235275 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.235293 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:22Z","lastTransitionTime":"2025-11-25T23:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.339106 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.339149 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.339169 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.339193 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.339214 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:22Z","lastTransitionTime":"2025-11-25T23:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.395690 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.395753 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.395755 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:22 crc kubenswrapper[5045]: E1125 23:00:22.396083 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:22 crc kubenswrapper[5045]: E1125 23:00:22.396315 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:22 crc kubenswrapper[5045]: E1125 23:00:22.396559 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.427020 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.427082 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.427101 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.427126 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.427144 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:22Z","lastTransitionTime":"2025-11-25T23:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:22 crc kubenswrapper[5045]: E1125 23:00:22.448228 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:22Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.455462 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.455517 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.455533 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.455559 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.455580 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:22Z","lastTransitionTime":"2025-11-25T23:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:22 crc kubenswrapper[5045]: E1125 23:00:22.474983 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:22Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.479918 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.479983 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.480005 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.480034 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.480055 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:22Z","lastTransitionTime":"2025-11-25T23:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:22 crc kubenswrapper[5045]: E1125 23:00:22.500853 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:22Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.505985 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.506039 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.506057 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.506083 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.506102 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:22Z","lastTransitionTime":"2025-11-25T23:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:22 crc kubenswrapper[5045]: E1125 23:00:22.527346 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:22Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.533022 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.533085 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.533106 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.533134 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.533151 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:22Z","lastTransitionTime":"2025-11-25T23:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:22 crc kubenswrapper[5045]: E1125 23:00:22.553619 5045 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3b8b2817-c700-40bf-9467-8031fbd1fc68\\\",\\\"systemUUID\\\":\\\"f06a8c5f-301f-4137-af20-68ca464a7a49\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:22Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:22 crc kubenswrapper[5045]: E1125 23:00:22.553858 5045 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.556167 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.556205 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.556223 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.556247 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.556266 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:22Z","lastTransitionTime":"2025-11-25T23:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.658936 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.658983 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.658999 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.659022 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.659040 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:22Z","lastTransitionTime":"2025-11-25T23:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.762667 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.762771 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.762798 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.762828 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.762851 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:22Z","lastTransitionTime":"2025-11-25T23:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.866007 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.866066 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.866082 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.866109 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.866123 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:22Z","lastTransitionTime":"2025-11-25T23:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.969668 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.969733 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.969749 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.969770 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:22 crc kubenswrapper[5045]: I1125 23:00:22.969783 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:22Z","lastTransitionTime":"2025-11-25T23:00:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.072266 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.072301 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.072311 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.072327 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.072346 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:23Z","lastTransitionTime":"2025-11-25T23:00:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.176630 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.176670 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.176685 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.176733 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.176747 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:23Z","lastTransitionTime":"2025-11-25T23:00:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.280460 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.280523 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.280540 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.280567 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.280585 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:23Z","lastTransitionTime":"2025-11-25T23:00:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.384276 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.384327 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.384340 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.384360 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.384373 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:23Z","lastTransitionTime":"2025-11-25T23:00:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.395956 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:23 crc kubenswrapper[5045]: E1125 23:00:23.396177 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.397645 5045 scope.go:117] "RemoveContainer" containerID="5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.493943 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.494041 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.494058 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.494115 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.494134 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:23Z","lastTransitionTime":"2025-11-25T23:00:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.597396 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.597439 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.597454 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.597476 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.597494 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:23Z","lastTransitionTime":"2025-11-25T23:00:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.700799 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.700905 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.700922 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.700948 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.700966 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:23Z","lastTransitionTime":"2025-11-25T23:00:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.804295 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.804349 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.804360 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.804379 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.804395 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:23Z","lastTransitionTime":"2025-11-25T23:00:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.906512 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.906571 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.906585 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.906604 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.906616 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:23Z","lastTransitionTime":"2025-11-25T23:00:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:23 crc kubenswrapper[5045]: I1125 23:00:23.996680 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovnkube-controller/2.log" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.001414 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerStarted","Data":"167e40a0da5c5b83f3f433e560723ae9cd40287f622c4f37daf958148fc9b4d6"} Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.003324 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.009278 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.009329 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.009349 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.009375 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.009394 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:24Z","lastTransitionTime":"2025-11-25T23:00:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.026671 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.044704 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.061096 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://804844cbb5c83dd672d509fa84b24623edccd2d4a8371777a3cd1242458b2455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa353669d8909b162edcf8d25149ed8cc87138d07b81631c4345d25966d81abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.091003 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.106985 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.111444 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.111491 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.111502 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.111522 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.111537 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:24Z","lastTransitionTime":"2025-11-25T23:00:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.120822 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e044b50-b07a-44a0-b69f-45fd4392de24\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9rjvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.138637 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.153109 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36eca948-ae19-4cad-b7d3-79835d6a261f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://163631d8567e2cd04119578ef686e51759ec95295743893d37b2f80701c11a97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b74ba0f23b2ba55eee278b2de153692a78291fa66786fa6840a1bb36f7a3c88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acb41a9b68b2aa00bbba92b627f8d231b0469068b708fb1e6f250f3c14e363c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57d85c9d70e22c55160692c2b290ede2224de3fdd2af0379d9bbcd8b30ed271b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://57d85c9d70e22c55160692c2b290ede2224de3fdd2af0379d9bbcd8b30ed271b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.177119 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.191968 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.207659 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.213610 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.213648 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.213661 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.213680 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.213700 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:24Z","lastTransitionTime":"2025-11-25T23:00:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.230951 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.247359 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.275233 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://167e40a0da5c5b83f3f433e560723ae9cd40287f622c4f37daf958148fc9b4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:56Z\\\",\\\"message\\\":\\\"a1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 22:59:56.430842 6696 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 22:59:56.430868 6696 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:56.430884 6696 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 22:59:56.430911 6696 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:56.430909 6696 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:56.431149 6696 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:56.431610 6696 factory.go:656] Stopping watch factory\\\\nI1125 22:59:56.431647 6696 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:56.431695 6696 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 22:59:56.431832 6696 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:55Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T23:00:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.292258 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af59329-334f-45de-a7f3-d5f39216fb96\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://237e4ca3eedcb7251aa8ae826c8d588c0dadd94658c90aa10b9c333480f90707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2257716cada94ae9f8060f96c64abd6e7dd2807584e46b17cad31d804da2daef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2257716cada94ae9f8060f96c64abd6e7dd2807584e46b17cad31d804da2daef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.310933 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.316494 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.316547 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.316565 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.316587 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.316604 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:24Z","lastTransitionTime":"2025-11-25T23:00:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.325673 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.338798 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfd5532dcf7609f7f39d9a3856d92510f05e1b76dd721f0b7c1eb4443b94f7d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T23:00:15Z\\\",\\\"message\\\":\\\"2025-11-25T22:59:29+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_31a85b6f-b119-4b3d-be49-18cdcc8d3702\\\\n2025-11-25T22:59:29+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_31a85b6f-b119-4b3d-be49-18cdcc8d3702 to /host/opt/cni/bin/\\\\n2025-11-25T22:59:30Z [verbose] multus-daemon started\\\\n2025-11-25T22:59:30Z [verbose] Readiness Indicator file check\\\\n2025-11-25T23:00:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T23:00:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.396398 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.396481 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:24 crc kubenswrapper[5045]: E1125 23:00:24.396540 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:24 crc kubenswrapper[5045]: E1125 23:00:24.396635 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.396838 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:24 crc kubenswrapper[5045]: E1125 23:00:24.397007 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.410555 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.418787 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.418841 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.418861 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.418887 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.418906 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:24Z","lastTransitionTime":"2025-11-25T23:00:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.429011 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfd5532dcf7609f7f39d9a3856d92510f05e1b76dd721f0b7c1eb4443b94f7d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T23:00:15Z\\\",\\\"message\\\":\\\"2025-11-25T22:59:29+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_31a85b6f-b119-4b3d-be49-18cdcc8d3702\\\\n2025-11-25T22:59:29+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_31a85b6f-b119-4b3d-be49-18cdcc8d3702 to /host/opt/cni/bin/\\\\n2025-11-25T22:59:30Z [verbose] multus-daemon started\\\\n2025-11-25T22:59:30Z [verbose] Readiness Indicator file check\\\\n2025-11-25T23:00:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T23:00:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.454616 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://167e40a0da5c5b83f3f433e560723ae9cd40287f622c4f37daf958148fc9b4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:56Z\\\",\\\"message\\\":\\\"a1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 22:59:56.430842 6696 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 22:59:56.430868 6696 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:56.430884 6696 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 22:59:56.430911 6696 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:56.430909 6696 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:56.431149 6696 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:56.431610 6696 factory.go:656] Stopping watch factory\\\\nI1125 22:59:56.431647 6696 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:56.431695 6696 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 22:59:56.431832 6696 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:55Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T23:00:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.470380 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af59329-334f-45de-a7f3-d5f39216fb96\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://237e4ca3eedcb7251aa8ae826c8d588c0dadd94658c90aa10b9c333480f90707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2257716cada94ae9f8060f96c64abd6e7dd2807584e46b17cad31d804da2daef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2257716cada94ae9f8060f96c64abd6e7dd2807584e46b17cad31d804da2daef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.487793 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.504631 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.521462 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.526488 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.526541 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.526564 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.526634 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.527539 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:24Z","lastTransitionTime":"2025-11-25T23:00:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.540347 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://804844cbb5c83dd672d509fa84b24623edccd2d4a8371777a3cd1242458b2455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa353669d8909b162edcf8d25149ed8cc87138d07b81631c4345d25966d81abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.556556 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.573378 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.594682 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.609437 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.624613 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e044b50-b07a-44a0-b69f-45fd4392de24\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9rjvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.630162 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.630208 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.630222 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.630244 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.630259 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:24Z","lastTransitionTime":"2025-11-25T23:00:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.647054 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.665358 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36eca948-ae19-4cad-b7d3-79835d6a261f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://163631d8567e2cd04119578ef686e51759ec95295743893d37b2f80701c11a97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b74ba0f23b2ba55eee278b2de153692a78291fa66786fa6840a1bb36f7a3c88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acb41a9b68b2aa00bbba92b627f8d231b0469068b708fb1e6f250f3c14e363c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57d85c9d70e22c55160692c2b290ede2224de3fdd2af0379d9bbcd8b30ed271b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://57d85c9d70e22c55160692c2b290ede2224de3fdd2af0379d9bbcd8b30ed271b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.682393 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.706176 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.724815 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.732987 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.733055 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.733075 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.733108 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.733126 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:24Z","lastTransitionTime":"2025-11-25T23:00:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.836299 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.836362 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.836381 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.836407 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.836429 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:24Z","lastTransitionTime":"2025-11-25T23:00:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.939190 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.939257 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.939281 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.939309 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:24 crc kubenswrapper[5045]: I1125 23:00:24.939333 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:24Z","lastTransitionTime":"2025-11-25T23:00:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.008781 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovnkube-controller/3.log" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.009778 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovnkube-controller/2.log" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.014444 5045 generic.go:334] "Generic (PLEG): container finished" podID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerID="167e40a0da5c5b83f3f433e560723ae9cd40287f622c4f37daf958148fc9b4d6" exitCode=1 Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.014508 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerDied","Data":"167e40a0da5c5b83f3f433e560723ae9cd40287f622c4f37daf958148fc9b4d6"} Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.014582 5045 scope.go:117] "RemoveContainer" containerID="5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.016415 5045 scope.go:117] "RemoveContainer" containerID="167e40a0da5c5b83f3f433e560723ae9cd40287f622c4f37daf958148fc9b4d6" Nov 25 23:00:25 crc kubenswrapper[5045]: E1125 23:00:25.017351 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mrsr4_openshift-ovn-kubernetes(0f81194f-4d48-4be6-9f73-8b34ed6b56cc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.042446 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.042497 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.042513 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.042538 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.042557 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:25Z","lastTransitionTime":"2025-11-25T23:00:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.044827 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:25Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.063970 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:25Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.091288 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://167e40a0da5c5b83f3f433e560723ae9cd40287f622c4f37daf958148fc9b4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cdae25ffeef0b8676d222e086dab1f74c224d39b12302289cbbafe81eebbb38\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T22:59:56Z\\\",\\\"message\\\":\\\"a1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 22:59:56.430842 6696 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 22:59:56.430868 6696 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:56.430884 6696 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 22:59:56.430911 6696 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 22:59:56.430909 6696 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 22:59:56.431149 6696 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 22:59:56.431610 6696 factory.go:656] Stopping watch factory\\\\nI1125 22:59:56.431647 6696 ovnkube.go:599] Stopped ovnkube\\\\nI1125 22:59:56.431695 6696 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 22:59:56.431832 6696 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:55Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://167e40a0da5c5b83f3f433e560723ae9cd40287f622c4f37daf958148fc9b4d6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T23:00:24Z\\\",\\\"message\\\":\\\"image-registry/node-ca-xmmf5\\\\nI1125 23:00:24.352408 7044 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-storage-version-migrator-operator/metrics\\\\\\\"}\\\\nI1125 23:00:24.352416 7044 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c in node crc\\\\nF1125 23:00:24.352420 7044 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z]\\\\nI1125 23:00:24.352425 7044 services_controller.go:360] Finished syncing service metrics on namespace openshift-kube-storage-version-mig\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T23:00:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:25Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.103949 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af59329-334f-45de-a7f3-d5f39216fb96\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://237e4ca3eedcb7251aa8ae826c8d588c0dadd94658c90aa10b9c333480f90707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2257716cada94ae9f8060f96c64abd6e7dd2807584e46b17cad31d804da2daef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2257716cada94ae9f8060f96c64abd6e7dd2807584e46b17cad31d804da2daef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:25Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.121543 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:25Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.137357 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:25Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.145432 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.145491 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.145512 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.145542 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.145564 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:25Z","lastTransitionTime":"2025-11-25T23:00:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.157168 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfd5532dcf7609f7f39d9a3856d92510f05e1b76dd721f0b7c1eb4443b94f7d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T23:00:15Z\\\",\\\"message\\\":\\\"2025-11-25T22:59:29+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_31a85b6f-b119-4b3d-be49-18cdcc8d3702\\\\n2025-11-25T22:59:29+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_31a85b6f-b119-4b3d-be49-18cdcc8d3702 to /host/opt/cni/bin/\\\\n2025-11-25T22:59:30Z [verbose] multus-daemon started\\\\n2025-11-25T22:59:30Z [verbose] Readiness Indicator file check\\\\n2025-11-25T23:00:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T23:00:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:25Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.176404 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:25Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.193533 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:25Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.216057 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://804844cbb5c83dd672d509fa84b24623edccd2d4a8371777a3cd1242458b2455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa353669d8909b162edcf8d25149ed8cc87138d07b81631c4345d25966d81abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:25Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.238219 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:25Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.249287 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.249342 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.249364 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.249396 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.249418 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:25Z","lastTransitionTime":"2025-11-25T23:00:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.253908 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:25Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.269016 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e044b50-b07a-44a0-b69f-45fd4392de24\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9rjvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:25Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.287229 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:25Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.304892 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36eca948-ae19-4cad-b7d3-79835d6a261f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://163631d8567e2cd04119578ef686e51759ec95295743893d37b2f80701c11a97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b74ba0f23b2ba55eee278b2de153692a78291fa66786fa6840a1bb36f7a3c88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acb41a9b68b2aa00bbba92b627f8d231b0469068b708fb1e6f250f3c14e363c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57d85c9d70e22c55160692c2b290ede2224de3fdd2af0379d9bbcd8b30ed271b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://57d85c9d70e22c55160692c2b290ede2224de3fdd2af0379d9bbcd8b30ed271b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:25Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.325680 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:25Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.344813 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:25Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.352643 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.352695 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.352773 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.352800 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.352818 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:25Z","lastTransitionTime":"2025-11-25T23:00:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.362043 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:25Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.396525 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:25 crc kubenswrapper[5045]: E1125 23:00:25.396704 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.455779 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.455827 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.455839 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.455858 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.455871 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:25Z","lastTransitionTime":"2025-11-25T23:00:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.559297 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.559351 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.559367 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.559388 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.559402 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:25Z","lastTransitionTime":"2025-11-25T23:00:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.662410 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.662480 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.662499 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.662526 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.662545 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:25Z","lastTransitionTime":"2025-11-25T23:00:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.765817 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.765878 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.765898 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.765925 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.765946 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:25Z","lastTransitionTime":"2025-11-25T23:00:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.868316 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.868391 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.868401 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.868418 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.868428 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:25Z","lastTransitionTime":"2025-11-25T23:00:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.972090 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.972151 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.972177 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.972206 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:25 crc kubenswrapper[5045]: I1125 23:00:25.972223 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:25Z","lastTransitionTime":"2025-11-25T23:00:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.020074 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovnkube-controller/3.log" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.025274 5045 scope.go:117] "RemoveContainer" containerID="167e40a0da5c5b83f3f433e560723ae9cd40287f622c4f37daf958148fc9b4d6" Nov 25 23:00:26 crc kubenswrapper[5045]: E1125 23:00:26.025532 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mrsr4_openshift-ovn-kubernetes(0f81194f-4d48-4be6-9f73-8b34ed6b56cc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.042224 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82a2c36c1ca46c8f29c165cfd592a0811598771543f53ba5ddbccaaff0eb957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:26Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.061033 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:26Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.075421 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.075497 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.075524 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.075558 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.075582 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:26Z","lastTransitionTime":"2025-11-25T23:00:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.082116 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:26Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.097527 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lbrq8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a9143958-692e-41d3-970d-ffdd160f8524\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cf267cf4446f4a7e2635a2b54678d8cc6e0c6a04ed0679e3e6754a17b7a393b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zj4hx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lbrq8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:26Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.111814 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e044b50-b07a-44a0-b69f-45fd4392de24\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr8t2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9rjvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:26Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.127160 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75d95a7a-3880-4be0-905e-86eace3106e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T22:59:18Z\\\",\\\"message\\\":\\\"W1125 22:59:07.706393 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 22:59:07.706791 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764111547 cert, and key in /tmp/serving-cert-60486912/serving-signer.crt, /tmp/serving-cert-60486912/serving-signer.key\\\\nI1125 22:59:07.853268 1 observer_polling.go:159] Starting file observer\\\\nW1125 22:59:07.857138 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 22:59:07.857501 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 22:59:07.858659 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-60486912/tls.crt::/tmp/serving-cert-60486912/tls.key\\\\\\\"\\\\nF1125 22:59:18.069123 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:26Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.139566 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36eca948-ae19-4cad-b7d3-79835d6a261f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://163631d8567e2cd04119578ef686e51759ec95295743893d37b2f80701c11a97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b74ba0f23b2ba55eee278b2de153692a78291fa66786fa6840a1bb36f7a3c88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acb41a9b68b2aa00bbba92b627f8d231b0469068b708fb1e6f250f3c14e363c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57d85c9d70e22c55160692c2b290ede2224de3fdd2af0379d9bbcd8b30ed271b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://57d85c9d70e22c55160692c2b290ede2224de3fdd2af0379d9bbcd8b30ed271b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:26Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.154421 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43b009d4d7e3fd7cbbc5133fe1c45baa84555f42636a3f2330512671b36af2a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:26Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.170764 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07acb2e0-1638-4174-8f01-b08385fca2dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c8af45f09b93ec3d612a59fff6071ee17c38529605be707af69d13995f33ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://946f8d8c23e9a8fc1e1b9b9affa38939238ee95f78b4586f748cb7e0579ce7cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e694a2168ba19a20444831d7669cf553c5712be782a1dae97ec2a258849b479\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a540b675857f3ed4e2b42f797fecd6852cabf716fc434e3bf8cbdef0f2f4de58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e09c6112959ed92c9f52cc0a404d15b743e01a826fefe82367b7d0c180ceecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://152e232576265633b65911541993dfa314cd63816a39a0141f433c93fdec13dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfdf3ef107a42f48d711f4e5577db555fa2b4e6ba5374fec7a09a9d4379c19f0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7dwhw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pqpcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:26Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.178530 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.178591 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.178610 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.178635 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.178653 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:26Z","lastTransitionTime":"2025-11-25T23:00:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.187674 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc394db7-8b38-4abe-841d-83a3ea3d07b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54937f21fd346cbca00dc71c1d00244416194b9f2fc45da2e1b1f003d3947cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ntr5v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7dpm4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:26Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.205649 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:26Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.225461 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-ht6dm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e971a47d-97d5-4a21-a255-2497b2b3cbbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T23:00:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfd5532dcf7609f7f39d9a3856d92510f05e1b76dd721f0b7c1eb4443b94f7d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T23:00:15Z\\\",\\\"message\\\":\\\"2025-11-25T22:59:29+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_31a85b6f-b119-4b3d-be49-18cdcc8d3702\\\\n2025-11-25T22:59:29+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_31a85b6f-b119-4b3d-be49-18cdcc8d3702 to /host/opt/cni/bin/\\\\n2025-11-25T22:59:30Z [verbose] multus-daemon started\\\\n2025-11-25T22:59:30Z [verbose] Readiness Indicator file check\\\\n2025-11-25T23:00:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T23:00:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jjrv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-ht6dm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:26Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.244218 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://167e40a0da5c5b83f3f433e560723ae9cd40287f622c4f37daf958148fc9b4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://167e40a0da5c5b83f3f433e560723ae9cd40287f622c4f37daf958148fc9b4d6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T23:00:24Z\\\",\\\"message\\\":\\\"image-registry/node-ca-xmmf5\\\\nI1125 23:00:24.352408 7044 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-storage-version-migrator-operator/metrics\\\\\\\"}\\\\nI1125 23:00:24.352416 7044 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c in node crc\\\\nF1125 23:00:24.352420 7044 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:24Z is after 2025-08-24T17:21:41Z]\\\\nI1125 23:00:24.352425 7044 services_controller.go:360] Finished syncing service metrics on namespace openshift-kube-storage-version-mig\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T23:00:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mrsr4_openshift-ovn-kubernetes(0f81194f-4d48-4be6-9f73-8b34ed6b56cc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8lg5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mrsr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:26Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.254272 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af59329-334f-45de-a7f3-d5f39216fb96\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://237e4ca3eedcb7251aa8ae826c8d588c0dadd94658c90aa10b9c333480f90707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2257716cada94ae9f8060f96c64abd6e7dd2807584e46b17cad31d804da2daef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2257716cada94ae9f8060f96c64abd6e7dd2807584e46b17cad31d804da2daef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T22:59:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:26Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.270675 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f55b1be7138691a52b6152c9de2b8e3bba0b87af191e64dbfc7c465b41ee3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1352e2aa4b62ab3500fddd303cb318625b77cd35493bcf253793afa7ab666447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:26Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.281605 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.281656 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.281665 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.281682 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.281696 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:26Z","lastTransitionTime":"2025-11-25T23:00:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.286541 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63543d46-bda7-4089-9991-aeb9f1262bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d0007f560f533e183ec6d07ecbbfb6a6c664e43c86ebcf06939a05cee336cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f664b70815a423b6eadfa40e4e0008a11ebef72189f0635e76de04d03700e5d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1fcf4467974a0da855aca3d062ad46e0acf170a91f666a966cf9c98231a632a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:26Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.302806 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xmmf5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1daf0409-a84d-47cc-9a90-7283ae93fced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://919be4add433af47b588be507b30208340ce04785b1dceb193604dbb253e0049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-99l7t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xmmf5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:26Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.314798 5045 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8fd492c-b76d-46cd-a320-eff22476cb6e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T22:59:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://804844cbb5c83dd672d509fa84b24623edccd2d4a8371777a3cd1242458b2455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa353669d8909b162edcf8d25149ed8cc87138d07b81631c4345d25966d81abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T22:59:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7fzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T22:59:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6rq7c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T23:00:26Z is after 2025-08-24T17:21:41Z" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.384833 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.384901 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.384921 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.384952 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.384973 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:26Z","lastTransitionTime":"2025-11-25T23:00:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.396221 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.396272 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.396286 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:26 crc kubenswrapper[5045]: E1125 23:00:26.396403 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:26 crc kubenswrapper[5045]: E1125 23:00:26.396539 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:26 crc kubenswrapper[5045]: E1125 23:00:26.396681 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.487730 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.487777 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.487787 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.487802 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.487811 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:26Z","lastTransitionTime":"2025-11-25T23:00:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.591303 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.591374 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.591394 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.591422 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.591443 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:26Z","lastTransitionTime":"2025-11-25T23:00:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.693778 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.693836 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.693853 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.693880 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.693899 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:26Z","lastTransitionTime":"2025-11-25T23:00:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.797011 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.797061 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.797075 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.797095 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.797109 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:26Z","lastTransitionTime":"2025-11-25T23:00:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.899828 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.899897 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.899909 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.899932 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:26 crc kubenswrapper[5045]: I1125 23:00:26.899949 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:26Z","lastTransitionTime":"2025-11-25T23:00:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.003562 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.003642 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.003660 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.003689 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.003750 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:27Z","lastTransitionTime":"2025-11-25T23:00:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.107182 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.107252 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.107277 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.107308 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.107330 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:27Z","lastTransitionTime":"2025-11-25T23:00:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.210903 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.210959 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.210977 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.211002 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.211020 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:27Z","lastTransitionTime":"2025-11-25T23:00:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.314465 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.314534 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.314553 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.314581 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.314598 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:27Z","lastTransitionTime":"2025-11-25T23:00:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.396650 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:27 crc kubenswrapper[5045]: E1125 23:00:27.396925 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.416770 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.416832 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.416850 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.416877 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.416895 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:27Z","lastTransitionTime":"2025-11-25T23:00:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.519586 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.519686 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.519760 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.519802 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.519821 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:27Z","lastTransitionTime":"2025-11-25T23:00:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.623365 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.623430 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.623447 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.623473 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.623492 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:27Z","lastTransitionTime":"2025-11-25T23:00:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.726350 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.726414 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.726434 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.726458 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.726478 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:27Z","lastTransitionTime":"2025-11-25T23:00:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.830214 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.830285 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.830309 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.830340 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.830362 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:27Z","lastTransitionTime":"2025-11-25T23:00:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.934328 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.934379 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.934390 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.934410 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:27 crc kubenswrapper[5045]: I1125 23:00:27.934426 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:27Z","lastTransitionTime":"2025-11-25T23:00:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.036662 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.036757 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.036775 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.036800 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.036820 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:28Z","lastTransitionTime":"2025-11-25T23:00:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.074675 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:00:28 crc kubenswrapper[5045]: E1125 23:00:28.074946 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:32.074913495 +0000 UTC m=+148.432572647 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.075037 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.075089 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:28 crc kubenswrapper[5045]: E1125 23:00:28.075220 5045 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 23:00:28 crc kubenswrapper[5045]: E1125 23:00:28.075246 5045 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 23:00:28 crc kubenswrapper[5045]: E1125 23:00:28.075289 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 23:01:32.075274025 +0000 UTC m=+148.432933177 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 23:00:28 crc kubenswrapper[5045]: E1125 23:00:28.075319 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 23:01:32.075300276 +0000 UTC m=+148.432959428 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.140213 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.140248 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.140258 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.140273 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.140285 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:28Z","lastTransitionTime":"2025-11-25T23:00:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.176492 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:28 crc kubenswrapper[5045]: E1125 23:00:28.176692 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 23:00:28 crc kubenswrapper[5045]: E1125 23:00:28.176745 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 23:00:28 crc kubenswrapper[5045]: E1125 23:00:28.176761 5045 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 23:00:28 crc kubenswrapper[5045]: E1125 23:00:28.176811 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 23:01:32.17679433 +0000 UTC m=+148.534453452 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.176703 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:28 crc kubenswrapper[5045]: E1125 23:00:28.176951 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 23:00:28 crc kubenswrapper[5045]: E1125 23:00:28.176990 5045 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 23:00:28 crc kubenswrapper[5045]: E1125 23:00:28.177022 5045 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 23:00:28 crc kubenswrapper[5045]: E1125 23:00:28.177157 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 23:01:32.17712353 +0000 UTC m=+148.534782682 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.243894 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.243962 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.243986 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.244012 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.244030 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:28Z","lastTransitionTime":"2025-11-25T23:00:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.347571 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.347670 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.347690 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.347753 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.347775 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:28Z","lastTransitionTime":"2025-11-25T23:00:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.396098 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.396112 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.396278 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:28 crc kubenswrapper[5045]: E1125 23:00:28.396833 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:28 crc kubenswrapper[5045]: E1125 23:00:28.396951 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:28 crc kubenswrapper[5045]: E1125 23:00:28.397035 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.450401 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.450460 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.450478 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.450510 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.450527 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:28Z","lastTransitionTime":"2025-11-25T23:00:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.553882 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.553933 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.553943 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.553964 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.553976 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:28Z","lastTransitionTime":"2025-11-25T23:00:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.657332 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.657382 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.657400 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.657422 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.657864 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:28Z","lastTransitionTime":"2025-11-25T23:00:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.765028 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.765092 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.765112 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.765191 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.765222 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:28Z","lastTransitionTime":"2025-11-25T23:00:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.868389 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.868456 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.868477 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.868502 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.868521 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:28Z","lastTransitionTime":"2025-11-25T23:00:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.972880 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.972938 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.972956 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.972983 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:28 crc kubenswrapper[5045]: I1125 23:00:28.973002 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:28Z","lastTransitionTime":"2025-11-25T23:00:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.075606 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.076145 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.076338 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.076525 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.076766 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:29Z","lastTransitionTime":"2025-11-25T23:00:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.180375 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.180423 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.180441 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.180468 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.180486 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:29Z","lastTransitionTime":"2025-11-25T23:00:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.284584 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.285087 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.285315 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.285511 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.285694 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:29Z","lastTransitionTime":"2025-11-25T23:00:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.389554 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.389912 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.390108 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.390269 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.390441 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:29Z","lastTransitionTime":"2025-11-25T23:00:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.396117 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:29 crc kubenswrapper[5045]: E1125 23:00:29.396573 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.493169 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.494124 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.494284 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.494426 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.494547 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:29Z","lastTransitionTime":"2025-11-25T23:00:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.597774 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.597838 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.597856 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.597880 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.597901 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:29Z","lastTransitionTime":"2025-11-25T23:00:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.701267 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.701330 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.701353 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.701383 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.701406 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:29Z","lastTransitionTime":"2025-11-25T23:00:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.805201 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.805256 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.805272 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.805295 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.805313 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:29Z","lastTransitionTime":"2025-11-25T23:00:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.908260 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.908325 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.908348 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.908373 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:29 crc kubenswrapper[5045]: I1125 23:00:29.908393 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:29Z","lastTransitionTime":"2025-11-25T23:00:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.011429 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.011483 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.011500 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.011525 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.011546 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:30Z","lastTransitionTime":"2025-11-25T23:00:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.114675 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.114787 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.114831 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.114857 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.114874 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:30Z","lastTransitionTime":"2025-11-25T23:00:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.218552 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.218613 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.218632 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.218654 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.218672 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:30Z","lastTransitionTime":"2025-11-25T23:00:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.321846 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.321891 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.321911 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.321933 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.321950 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:30Z","lastTransitionTime":"2025-11-25T23:00:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.396138 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.396194 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.396204 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:30 crc kubenswrapper[5045]: E1125 23:00:30.397038 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:30 crc kubenswrapper[5045]: E1125 23:00:30.397360 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:30 crc kubenswrapper[5045]: E1125 23:00:30.397581 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.424164 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.424215 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.424232 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.424256 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.424278 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:30Z","lastTransitionTime":"2025-11-25T23:00:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.527468 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.527538 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.527556 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.527584 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.527604 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:30Z","lastTransitionTime":"2025-11-25T23:00:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.631571 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.631649 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.631674 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.631704 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.631746 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:30Z","lastTransitionTime":"2025-11-25T23:00:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.735066 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.735135 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.735159 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.735189 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.735209 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:30Z","lastTransitionTime":"2025-11-25T23:00:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.838833 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.838934 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.838960 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.839184 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.839208 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:30Z","lastTransitionTime":"2025-11-25T23:00:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.942070 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.942138 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.942163 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.942193 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:30 crc kubenswrapper[5045]: I1125 23:00:30.942216 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:30Z","lastTransitionTime":"2025-11-25T23:00:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.046389 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.046456 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.046479 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.046510 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.046535 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:31Z","lastTransitionTime":"2025-11-25T23:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.156442 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.156851 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.156875 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.156904 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.157138 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:31Z","lastTransitionTime":"2025-11-25T23:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.260427 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.260498 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.260523 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.260552 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.260577 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:31Z","lastTransitionTime":"2025-11-25T23:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.363755 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.363840 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.363861 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.363894 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.363913 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:31Z","lastTransitionTime":"2025-11-25T23:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.396303 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:31 crc kubenswrapper[5045]: E1125 23:00:31.396706 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.468126 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.468195 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.468222 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.468249 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.468273 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:31Z","lastTransitionTime":"2025-11-25T23:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.571404 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.572227 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.572425 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.572631 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.572896 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:31Z","lastTransitionTime":"2025-11-25T23:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.675595 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.675653 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.675670 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.675693 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.675709 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:31Z","lastTransitionTime":"2025-11-25T23:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.779956 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.780007 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.780023 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.780047 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.780064 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:31Z","lastTransitionTime":"2025-11-25T23:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.883593 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.883671 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.883690 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.883744 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.883784 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:31Z","lastTransitionTime":"2025-11-25T23:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.987664 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.987773 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.987797 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.987827 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:31 crc kubenswrapper[5045]: I1125 23:00:31.987845 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:31Z","lastTransitionTime":"2025-11-25T23:00:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.091336 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.091428 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.091458 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.091488 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.091510 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:32Z","lastTransitionTime":"2025-11-25T23:00:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.194868 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.194919 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.194931 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.194956 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.194970 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:32Z","lastTransitionTime":"2025-11-25T23:00:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.298419 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.298513 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.298532 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.298559 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.298578 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:32Z","lastTransitionTime":"2025-11-25T23:00:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.396349 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.396677 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.396840 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:32 crc kubenswrapper[5045]: E1125 23:00:32.397127 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:32 crc kubenswrapper[5045]: E1125 23:00:32.397949 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:32 crc kubenswrapper[5045]: E1125 23:00:32.398198 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.401746 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.401819 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.401838 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.401869 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.401887 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:32Z","lastTransitionTime":"2025-11-25T23:00:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.505197 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.505268 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.505286 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.505312 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.505331 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:32Z","lastTransitionTime":"2025-11-25T23:00:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.609147 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.609263 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.609284 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.609319 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.609343 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:32Z","lastTransitionTime":"2025-11-25T23:00:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.713096 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.713189 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.713213 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.713247 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.713271 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:32Z","lastTransitionTime":"2025-11-25T23:00:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.821615 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.821698 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.821751 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.821782 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.821805 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:32Z","lastTransitionTime":"2025-11-25T23:00:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.861759 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.861827 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.861848 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.861880 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.861900 5045 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T23:00:32Z","lastTransitionTime":"2025-11-25T23:00:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.932541 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4"] Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.933504 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.935978 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.937249 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.938129 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.939972 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 23:00:32 crc kubenswrapper[5045]: I1125 23:00:32.994858 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=71.994826709 podStartE2EDuration="1m11.994826709s" podCreationTimestamp="2025-11-25 22:59:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:00:32.990827391 +0000 UTC m=+89.348486533" watchObservedRunningTime="2025-11-25 23:00:32.994826709 +0000 UTC m=+89.352485851" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.011629 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=35.01159889 podStartE2EDuration="35.01159889s" podCreationTimestamp="2025-11-25 22:59:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:00:33.011390014 +0000 UTC m=+89.369049166" watchObservedRunningTime="2025-11-25 23:00:33.01159889 +0000 UTC m=+89.369258042" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.034781 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/30013680-17d5-44b8-9077-4579d6ea3647-service-ca\") pod \"cluster-version-operator-5c965bbfc6-vbct4\" (UID: \"30013680-17d5-44b8-9077-4579d6ea3647\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.034858 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/30013680-17d5-44b8-9077-4579d6ea3647-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-vbct4\" (UID: \"30013680-17d5-44b8-9077-4579d6ea3647\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.034927 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/30013680-17d5-44b8-9077-4579d6ea3647-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-vbct4\" (UID: \"30013680-17d5-44b8-9077-4579d6ea3647\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.035024 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/30013680-17d5-44b8-9077-4579d6ea3647-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-vbct4\" (UID: \"30013680-17d5-44b8-9077-4579d6ea3647\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.035106 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/30013680-17d5-44b8-9077-4579d6ea3647-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-vbct4\" (UID: \"30013680-17d5-44b8-9077-4579d6ea3647\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.136193 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/30013680-17d5-44b8-9077-4579d6ea3647-service-ca\") pod \"cluster-version-operator-5c965bbfc6-vbct4\" (UID: \"30013680-17d5-44b8-9077-4579d6ea3647\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.136251 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/30013680-17d5-44b8-9077-4579d6ea3647-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-vbct4\" (UID: \"30013680-17d5-44b8-9077-4579d6ea3647\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.136305 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/30013680-17d5-44b8-9077-4579d6ea3647-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-vbct4\" (UID: \"30013680-17d5-44b8-9077-4579d6ea3647\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.136347 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/30013680-17d5-44b8-9077-4579d6ea3647-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-vbct4\" (UID: \"30013680-17d5-44b8-9077-4579d6ea3647\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.136374 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/30013680-17d5-44b8-9077-4579d6ea3647-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-vbct4\" (UID: \"30013680-17d5-44b8-9077-4579d6ea3647\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.136406 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/30013680-17d5-44b8-9077-4579d6ea3647-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-vbct4\" (UID: \"30013680-17d5-44b8-9077-4579d6ea3647\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.136869 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/30013680-17d5-44b8-9077-4579d6ea3647-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-vbct4\" (UID: \"30013680-17d5-44b8-9077-4579d6ea3647\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.140205 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/30013680-17d5-44b8-9077-4579d6ea3647-service-ca\") pod \"cluster-version-operator-5c965bbfc6-vbct4\" (UID: \"30013680-17d5-44b8-9077-4579d6ea3647\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.144673 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-lbrq8" podStartSLOduration=69.144637818 podStartE2EDuration="1m9.144637818s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:00:33.123773167 +0000 UTC m=+89.481432319" watchObservedRunningTime="2025-11-25 23:00:33.144637818 +0000 UTC m=+89.502296970" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.146439 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/30013680-17d5-44b8-9077-4579d6ea3647-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-vbct4\" (UID: \"30013680-17d5-44b8-9077-4579d6ea3647\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.167585 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/30013680-17d5-44b8-9077-4579d6ea3647-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-vbct4\" (UID: \"30013680-17d5-44b8-9077-4579d6ea3647\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.186689 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-pqpcg" podStartSLOduration=69.186659889 podStartE2EDuration="1m9.186659889s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:00:33.186198225 +0000 UTC m=+89.543857347" watchObservedRunningTime="2025-11-25 23:00:33.186659889 +0000 UTC m=+89.544319031" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.206754 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podStartSLOduration=69.206661765 podStartE2EDuration="1m9.206661765s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:00:33.20580418 +0000 UTC m=+89.563463322" watchObservedRunningTime="2025-11-25 23:00:33.206661765 +0000 UTC m=+89.564320907" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.221228 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=12.221199901 podStartE2EDuration="12.221199901s" podCreationTimestamp="2025-11-25 23:00:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:00:33.220235513 +0000 UTC m=+89.577894675" watchObservedRunningTime="2025-11-25 23:00:33.221199901 +0000 UTC m=+89.578859023" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.263098 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4" Nov 25 23:00:33 crc kubenswrapper[5045]: W1125 23:00:33.290782 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod30013680_17d5_44b8_9077_4579d6ea3647.slice/crio-75e449fd7690bb81ce91f660c5747a9fb5b87e1fa0639aba1e165503cdf331d3 WatchSource:0}: Error finding container 75e449fd7690bb81ce91f660c5747a9fb5b87e1fa0639aba1e165503cdf331d3: Status 404 returned error can't find the container with id 75e449fd7690bb81ce91f660c5747a9fb5b87e1fa0639aba1e165503cdf331d3 Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.325461 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-ht6dm" podStartSLOduration=69.325436105 podStartE2EDuration="1m9.325436105s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:00:33.290927694 +0000 UTC m=+89.648586816" watchObservedRunningTime="2025-11-25 23:00:33.325436105 +0000 UTC m=+89.683095227" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.367026 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-xmmf5" podStartSLOduration=69.367005573 podStartE2EDuration="1m9.367005573s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:00:33.366390775 +0000 UTC m=+89.724049907" watchObservedRunningTime="2025-11-25 23:00:33.367005573 +0000 UTC m=+89.724664695" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.368424 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=68.368413244 podStartE2EDuration="1m8.368413244s" podCreationTimestamp="2025-11-25 22:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:00:33.349540871 +0000 UTC m=+89.707199993" watchObservedRunningTime="2025-11-25 23:00:33.368413244 +0000 UTC m=+89.726072366" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.386584 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6rq7c" podStartSLOduration=68.386551256 podStartE2EDuration="1m8.386551256s" podCreationTimestamp="2025-11-25 22:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:00:33.385834684 +0000 UTC m=+89.743493826" watchObservedRunningTime="2025-11-25 23:00:33.386551256 +0000 UTC m=+89.744210408" Nov 25 23:00:33 crc kubenswrapper[5045]: I1125 23:00:33.397603 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:33 crc kubenswrapper[5045]: E1125 23:00:33.397875 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:34 crc kubenswrapper[5045]: I1125 23:00:34.064678 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4" event={"ID":"30013680-17d5-44b8-9077-4579d6ea3647","Type":"ContainerStarted","Data":"1660665905bab6e1f90f8969c631ac31d312a0d5951975be7b06a731fe7a60c9"} Nov 25 23:00:34 crc kubenswrapper[5045]: I1125 23:00:34.064839 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4" event={"ID":"30013680-17d5-44b8-9077-4579d6ea3647","Type":"ContainerStarted","Data":"75e449fd7690bb81ce91f660c5747a9fb5b87e1fa0639aba1e165503cdf331d3"} Nov 25 23:00:34 crc kubenswrapper[5045]: I1125 23:00:34.086928 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vbct4" podStartSLOduration=70.086905103 podStartE2EDuration="1m10.086905103s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:00:34.086052819 +0000 UTC m=+90.443711961" watchObservedRunningTime="2025-11-25 23:00:34.086905103 +0000 UTC m=+90.444564225" Nov 25 23:00:34 crc kubenswrapper[5045]: I1125 23:00:34.396678 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:34 crc kubenswrapper[5045]: I1125 23:00:34.396811 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:34 crc kubenswrapper[5045]: E1125 23:00:34.398472 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:34 crc kubenswrapper[5045]: I1125 23:00:34.398530 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:34 crc kubenswrapper[5045]: E1125 23:00:34.398769 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:34 crc kubenswrapper[5045]: E1125 23:00:34.398931 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:35 crc kubenswrapper[5045]: I1125 23:00:35.396419 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:35 crc kubenswrapper[5045]: E1125 23:00:35.396670 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:36 crc kubenswrapper[5045]: I1125 23:00:36.396123 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:36 crc kubenswrapper[5045]: I1125 23:00:36.396165 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:36 crc kubenswrapper[5045]: E1125 23:00:36.396277 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:36 crc kubenswrapper[5045]: I1125 23:00:36.396330 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:36 crc kubenswrapper[5045]: E1125 23:00:36.396539 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:36 crc kubenswrapper[5045]: E1125 23:00:36.396620 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:36 crc kubenswrapper[5045]: I1125 23:00:36.397553 5045 scope.go:117] "RemoveContainer" containerID="167e40a0da5c5b83f3f433e560723ae9cd40287f622c4f37daf958148fc9b4d6" Nov 25 23:00:36 crc kubenswrapper[5045]: E1125 23:00:36.397920 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mrsr4_openshift-ovn-kubernetes(0f81194f-4d48-4be6-9f73-8b34ed6b56cc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" Nov 25 23:00:37 crc kubenswrapper[5045]: I1125 23:00:37.396064 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:37 crc kubenswrapper[5045]: E1125 23:00:37.396218 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:38 crc kubenswrapper[5045]: I1125 23:00:38.396456 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:38 crc kubenswrapper[5045]: I1125 23:00:38.396544 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:38 crc kubenswrapper[5045]: E1125 23:00:38.396695 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:38 crc kubenswrapper[5045]: I1125 23:00:38.396777 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:38 crc kubenswrapper[5045]: E1125 23:00:38.396964 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:38 crc kubenswrapper[5045]: E1125 23:00:38.397110 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:39 crc kubenswrapper[5045]: I1125 23:00:39.396058 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:39 crc kubenswrapper[5045]: E1125 23:00:39.396332 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:40 crc kubenswrapper[5045]: I1125 23:00:40.396061 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:40 crc kubenswrapper[5045]: I1125 23:00:40.396076 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:40 crc kubenswrapper[5045]: E1125 23:00:40.396654 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:40 crc kubenswrapper[5045]: I1125 23:00:40.396145 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:40 crc kubenswrapper[5045]: E1125 23:00:40.396859 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:40 crc kubenswrapper[5045]: E1125 23:00:40.397068 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:41 crc kubenswrapper[5045]: I1125 23:00:41.396750 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:41 crc kubenswrapper[5045]: E1125 23:00:41.397838 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:42 crc kubenswrapper[5045]: I1125 23:00:42.396621 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:42 crc kubenswrapper[5045]: I1125 23:00:42.396679 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:42 crc kubenswrapper[5045]: I1125 23:00:42.396741 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:42 crc kubenswrapper[5045]: E1125 23:00:42.396835 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:42 crc kubenswrapper[5045]: E1125 23:00:42.397034 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:42 crc kubenswrapper[5045]: E1125 23:00:42.397149 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:43 crc kubenswrapper[5045]: I1125 23:00:43.263533 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs\") pod \"network-metrics-daemon-9rjvw\" (UID: \"9e044b50-b07a-44a0-b69f-45fd4392de24\") " pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:43 crc kubenswrapper[5045]: E1125 23:00:43.263823 5045 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 23:00:43 crc kubenswrapper[5045]: E1125 23:00:43.263921 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs podName:9e044b50-b07a-44a0-b69f-45fd4392de24 nodeName:}" failed. No retries permitted until 2025-11-25 23:01:47.26389058 +0000 UTC m=+163.621549732 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs") pod "network-metrics-daemon-9rjvw" (UID: "9e044b50-b07a-44a0-b69f-45fd4392de24") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 23:00:43 crc kubenswrapper[5045]: I1125 23:00:43.396510 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:43 crc kubenswrapper[5045]: E1125 23:00:43.396707 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:43 crc kubenswrapper[5045]: I1125 23:00:43.420689 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 25 23:00:44 crc kubenswrapper[5045]: I1125 23:00:44.396293 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:44 crc kubenswrapper[5045]: I1125 23:00:44.396458 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:44 crc kubenswrapper[5045]: I1125 23:00:44.398368 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:44 crc kubenswrapper[5045]: E1125 23:00:44.398344 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:44 crc kubenswrapper[5045]: E1125 23:00:44.398585 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:44 crc kubenswrapper[5045]: E1125 23:00:44.398815 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:44 crc kubenswrapper[5045]: I1125 23:00:44.444197 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=1.44416545 podStartE2EDuration="1.44416545s" podCreationTimestamp="2025-11-25 23:00:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:00:44.437768172 +0000 UTC m=+100.795427324" watchObservedRunningTime="2025-11-25 23:00:44.44416545 +0000 UTC m=+100.801824612" Nov 25 23:00:45 crc kubenswrapper[5045]: I1125 23:00:45.396592 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:45 crc kubenswrapper[5045]: E1125 23:00:45.396842 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:46 crc kubenswrapper[5045]: I1125 23:00:46.396689 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:46 crc kubenswrapper[5045]: I1125 23:00:46.396839 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:46 crc kubenswrapper[5045]: E1125 23:00:46.396950 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:46 crc kubenswrapper[5045]: I1125 23:00:46.396975 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:46 crc kubenswrapper[5045]: E1125 23:00:46.397064 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:46 crc kubenswrapper[5045]: E1125 23:00:46.397274 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:47 crc kubenswrapper[5045]: I1125 23:00:47.396123 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:47 crc kubenswrapper[5045]: E1125 23:00:47.396325 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:48 crc kubenswrapper[5045]: I1125 23:00:48.396476 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:48 crc kubenswrapper[5045]: I1125 23:00:48.396598 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:48 crc kubenswrapper[5045]: E1125 23:00:48.396664 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:48 crc kubenswrapper[5045]: I1125 23:00:48.396693 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:48 crc kubenswrapper[5045]: E1125 23:00:48.396880 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:48 crc kubenswrapper[5045]: E1125 23:00:48.397172 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:49 crc kubenswrapper[5045]: I1125 23:00:49.396775 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:49 crc kubenswrapper[5045]: E1125 23:00:49.396973 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:49 crc kubenswrapper[5045]: I1125 23:00:49.398360 5045 scope.go:117] "RemoveContainer" containerID="167e40a0da5c5b83f3f433e560723ae9cd40287f622c4f37daf958148fc9b4d6" Nov 25 23:00:49 crc kubenswrapper[5045]: E1125 23:00:49.398696 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mrsr4_openshift-ovn-kubernetes(0f81194f-4d48-4be6-9f73-8b34ed6b56cc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" Nov 25 23:00:50 crc kubenswrapper[5045]: I1125 23:00:50.395979 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:50 crc kubenswrapper[5045]: I1125 23:00:50.396134 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:50 crc kubenswrapper[5045]: E1125 23:00:50.396250 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:50 crc kubenswrapper[5045]: I1125 23:00:50.396261 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:50 crc kubenswrapper[5045]: E1125 23:00:50.396400 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:50 crc kubenswrapper[5045]: E1125 23:00:50.396592 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:51 crc kubenswrapper[5045]: I1125 23:00:51.395597 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:51 crc kubenswrapper[5045]: E1125 23:00:51.395804 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:52 crc kubenswrapper[5045]: I1125 23:00:52.396313 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:52 crc kubenswrapper[5045]: I1125 23:00:52.396391 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:52 crc kubenswrapper[5045]: E1125 23:00:52.396488 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:52 crc kubenswrapper[5045]: I1125 23:00:52.396319 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:52 crc kubenswrapper[5045]: E1125 23:00:52.396672 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:52 crc kubenswrapper[5045]: E1125 23:00:52.397006 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:53 crc kubenswrapper[5045]: I1125 23:00:53.395840 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:53 crc kubenswrapper[5045]: E1125 23:00:53.396029 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:54 crc kubenswrapper[5045]: I1125 23:00:54.397029 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:54 crc kubenswrapper[5045]: I1125 23:00:54.397195 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:54 crc kubenswrapper[5045]: E1125 23:00:54.399013 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:54 crc kubenswrapper[5045]: I1125 23:00:54.399086 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:54 crc kubenswrapper[5045]: E1125 23:00:54.399486 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:54 crc kubenswrapper[5045]: E1125 23:00:54.399625 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:55 crc kubenswrapper[5045]: I1125 23:00:55.395673 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:55 crc kubenswrapper[5045]: E1125 23:00:55.395906 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:56 crc kubenswrapper[5045]: I1125 23:00:56.395996 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:56 crc kubenswrapper[5045]: I1125 23:00:56.396047 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:56 crc kubenswrapper[5045]: I1125 23:00:56.395994 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:56 crc kubenswrapper[5045]: E1125 23:00:56.396213 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:56 crc kubenswrapper[5045]: E1125 23:00:56.396332 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:56 crc kubenswrapper[5045]: E1125 23:00:56.396556 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:57 crc kubenswrapper[5045]: I1125 23:00:57.396021 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:57 crc kubenswrapper[5045]: E1125 23:00:57.396193 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:00:58 crc kubenswrapper[5045]: I1125 23:00:58.395647 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:00:58 crc kubenswrapper[5045]: I1125 23:00:58.395745 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:00:58 crc kubenswrapper[5045]: I1125 23:00:58.395805 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:00:58 crc kubenswrapper[5045]: E1125 23:00:58.395871 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:00:58 crc kubenswrapper[5045]: E1125 23:00:58.396005 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:00:58 crc kubenswrapper[5045]: E1125 23:00:58.396164 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:00:59 crc kubenswrapper[5045]: I1125 23:00:59.395480 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:00:59 crc kubenswrapper[5045]: E1125 23:00:59.395932 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:01:00 crc kubenswrapper[5045]: I1125 23:01:00.395781 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:01:00 crc kubenswrapper[5045]: E1125 23:01:00.395988 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:01:00 crc kubenswrapper[5045]: I1125 23:01:00.396080 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:01:00 crc kubenswrapper[5045]: E1125 23:01:00.396528 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:01:00 crc kubenswrapper[5045]: I1125 23:01:00.396946 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:01:00 crc kubenswrapper[5045]: E1125 23:01:00.397143 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:01:01 crc kubenswrapper[5045]: I1125 23:01:01.396611 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:01:01 crc kubenswrapper[5045]: E1125 23:01:01.396898 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:01:02 crc kubenswrapper[5045]: I1125 23:01:02.172547 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-ht6dm_e971a47d-97d5-4a21-a255-2497b2b3cbbc/kube-multus/1.log" Nov 25 23:01:02 crc kubenswrapper[5045]: I1125 23:01:02.173612 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-ht6dm_e971a47d-97d5-4a21-a255-2497b2b3cbbc/kube-multus/0.log" Nov 25 23:01:02 crc kubenswrapper[5045]: I1125 23:01:02.173699 5045 generic.go:334] "Generic (PLEG): container finished" podID="e971a47d-97d5-4a21-a255-2497b2b3cbbc" containerID="bfd5532dcf7609f7f39d9a3856d92510f05e1b76dd721f0b7c1eb4443b94f7d3" exitCode=1 Nov 25 23:01:02 crc kubenswrapper[5045]: I1125 23:01:02.173775 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-ht6dm" event={"ID":"e971a47d-97d5-4a21-a255-2497b2b3cbbc","Type":"ContainerDied","Data":"bfd5532dcf7609f7f39d9a3856d92510f05e1b76dd721f0b7c1eb4443b94f7d3"} Nov 25 23:01:02 crc kubenswrapper[5045]: I1125 23:01:02.173821 5045 scope.go:117] "RemoveContainer" containerID="69033179457f0fa15a666139b70f14b70ac1ed576f3dc63846a1c5e2ee325ee5" Nov 25 23:01:02 crc kubenswrapper[5045]: I1125 23:01:02.174403 5045 scope.go:117] "RemoveContainer" containerID="bfd5532dcf7609f7f39d9a3856d92510f05e1b76dd721f0b7c1eb4443b94f7d3" Nov 25 23:01:02 crc kubenswrapper[5045]: E1125 23:01:02.174656 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-ht6dm_openshift-multus(e971a47d-97d5-4a21-a255-2497b2b3cbbc)\"" pod="openshift-multus/multus-ht6dm" podUID="e971a47d-97d5-4a21-a255-2497b2b3cbbc" Nov 25 23:01:02 crc kubenswrapper[5045]: I1125 23:01:02.395884 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:01:02 crc kubenswrapper[5045]: I1125 23:01:02.396117 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:01:02 crc kubenswrapper[5045]: E1125 23:01:02.396121 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:01:02 crc kubenswrapper[5045]: I1125 23:01:02.396201 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:01:02 crc kubenswrapper[5045]: E1125 23:01:02.396440 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:01:02 crc kubenswrapper[5045]: E1125 23:01:02.396917 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:01:02 crc kubenswrapper[5045]: I1125 23:01:02.398929 5045 scope.go:117] "RemoveContainer" containerID="167e40a0da5c5b83f3f433e560723ae9cd40287f622c4f37daf958148fc9b4d6" Nov 25 23:01:02 crc kubenswrapper[5045]: E1125 23:01:02.399360 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mrsr4_openshift-ovn-kubernetes(0f81194f-4d48-4be6-9f73-8b34ed6b56cc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" Nov 25 23:01:03 crc kubenswrapper[5045]: I1125 23:01:03.180658 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-ht6dm_e971a47d-97d5-4a21-a255-2497b2b3cbbc/kube-multus/1.log" Nov 25 23:01:03 crc kubenswrapper[5045]: I1125 23:01:03.395854 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:01:03 crc kubenswrapper[5045]: E1125 23:01:03.396038 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:01:04 crc kubenswrapper[5045]: E1125 23:01:04.323938 5045 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 25 23:01:04 crc kubenswrapper[5045]: I1125 23:01:04.396412 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:01:04 crc kubenswrapper[5045]: I1125 23:01:04.396605 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:01:04 crc kubenswrapper[5045]: I1125 23:01:04.398603 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:01:04 crc kubenswrapper[5045]: E1125 23:01:04.398604 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:01:04 crc kubenswrapper[5045]: E1125 23:01:04.398786 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:01:04 crc kubenswrapper[5045]: E1125 23:01:04.398917 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:01:04 crc kubenswrapper[5045]: E1125 23:01:04.515797 5045 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 23:01:05 crc kubenswrapper[5045]: I1125 23:01:05.395924 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:01:05 crc kubenswrapper[5045]: E1125 23:01:05.396124 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:01:06 crc kubenswrapper[5045]: I1125 23:01:06.396400 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:01:06 crc kubenswrapper[5045]: I1125 23:01:06.396476 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:01:06 crc kubenswrapper[5045]: I1125 23:01:06.396530 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:01:06 crc kubenswrapper[5045]: E1125 23:01:06.398339 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:01:06 crc kubenswrapper[5045]: E1125 23:01:06.399246 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:01:06 crc kubenswrapper[5045]: E1125 23:01:06.399589 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:01:07 crc kubenswrapper[5045]: I1125 23:01:07.395566 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:01:07 crc kubenswrapper[5045]: E1125 23:01:07.395783 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:01:08 crc kubenswrapper[5045]: I1125 23:01:08.396366 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:01:08 crc kubenswrapper[5045]: I1125 23:01:08.396372 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:01:08 crc kubenswrapper[5045]: E1125 23:01:08.396599 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:01:08 crc kubenswrapper[5045]: I1125 23:01:08.396375 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:01:08 crc kubenswrapper[5045]: E1125 23:01:08.396882 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:01:08 crc kubenswrapper[5045]: E1125 23:01:08.396974 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:01:09 crc kubenswrapper[5045]: I1125 23:01:09.396581 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:01:09 crc kubenswrapper[5045]: E1125 23:01:09.396831 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:01:09 crc kubenswrapper[5045]: E1125 23:01:09.517251 5045 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 23:01:10 crc kubenswrapper[5045]: I1125 23:01:10.396422 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:01:10 crc kubenswrapper[5045]: I1125 23:01:10.396495 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:01:10 crc kubenswrapper[5045]: E1125 23:01:10.397022 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:01:10 crc kubenswrapper[5045]: I1125 23:01:10.396645 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:01:10 crc kubenswrapper[5045]: E1125 23:01:10.397155 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:01:10 crc kubenswrapper[5045]: E1125 23:01:10.397299 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:01:11 crc kubenswrapper[5045]: I1125 23:01:11.396422 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:01:11 crc kubenswrapper[5045]: E1125 23:01:11.396605 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:01:12 crc kubenswrapper[5045]: I1125 23:01:12.395745 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:01:12 crc kubenswrapper[5045]: I1125 23:01:12.395812 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:01:12 crc kubenswrapper[5045]: E1125 23:01:12.395968 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:01:12 crc kubenswrapper[5045]: I1125 23:01:12.395994 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:01:12 crc kubenswrapper[5045]: E1125 23:01:12.396146 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:01:12 crc kubenswrapper[5045]: E1125 23:01:12.396296 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:01:13 crc kubenswrapper[5045]: I1125 23:01:13.396364 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:01:13 crc kubenswrapper[5045]: E1125 23:01:13.396563 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:01:14 crc kubenswrapper[5045]: I1125 23:01:14.396649 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:01:14 crc kubenswrapper[5045]: I1125 23:01:14.396804 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:01:14 crc kubenswrapper[5045]: E1125 23:01:14.399209 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:01:14 crc kubenswrapper[5045]: I1125 23:01:14.399236 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:01:14 crc kubenswrapper[5045]: E1125 23:01:14.399335 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:01:14 crc kubenswrapper[5045]: E1125 23:01:14.399485 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:01:14 crc kubenswrapper[5045]: E1125 23:01:14.518623 5045 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 23:01:15 crc kubenswrapper[5045]: I1125 23:01:15.396686 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:01:15 crc kubenswrapper[5045]: I1125 23:01:15.397526 5045 scope.go:117] "RemoveContainer" containerID="bfd5532dcf7609f7f39d9a3856d92510f05e1b76dd721f0b7c1eb4443b94f7d3" Nov 25 23:01:15 crc kubenswrapper[5045]: E1125 23:01:15.397574 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:01:16 crc kubenswrapper[5045]: I1125 23:01:16.233033 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-ht6dm_e971a47d-97d5-4a21-a255-2497b2b3cbbc/kube-multus/1.log" Nov 25 23:01:16 crc kubenswrapper[5045]: I1125 23:01:16.233180 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-ht6dm" event={"ID":"e971a47d-97d5-4a21-a255-2497b2b3cbbc","Type":"ContainerStarted","Data":"864184c90b5847350d56d3814873c9e37d409583f933093bca4086657a5f1b17"} Nov 25 23:01:16 crc kubenswrapper[5045]: I1125 23:01:16.396557 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:01:16 crc kubenswrapper[5045]: I1125 23:01:16.396557 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:01:16 crc kubenswrapper[5045]: E1125 23:01:16.396773 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:01:16 crc kubenswrapper[5045]: E1125 23:01:16.397041 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:01:16 crc kubenswrapper[5045]: I1125 23:01:16.397519 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:01:16 crc kubenswrapper[5045]: E1125 23:01:16.397905 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:01:17 crc kubenswrapper[5045]: I1125 23:01:17.396563 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:01:17 crc kubenswrapper[5045]: E1125 23:01:17.397130 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:01:17 crc kubenswrapper[5045]: I1125 23:01:17.397589 5045 scope.go:117] "RemoveContainer" containerID="167e40a0da5c5b83f3f433e560723ae9cd40287f622c4f37daf958148fc9b4d6" Nov 25 23:01:18 crc kubenswrapper[5045]: I1125 23:01:18.243148 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovnkube-controller/3.log" Nov 25 23:01:18 crc kubenswrapper[5045]: I1125 23:01:18.246349 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerStarted","Data":"465de3e412536c5c0b9115d77b543fd35888b34050cc3ab59f7c63fed4418e93"} Nov 25 23:01:18 crc kubenswrapper[5045]: I1125 23:01:18.246782 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 23:01:18 crc kubenswrapper[5045]: I1125 23:01:18.290051 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" podStartSLOduration=114.290023287 podStartE2EDuration="1m54.290023287s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:18.289491481 +0000 UTC m=+134.647150603" watchObservedRunningTime="2025-11-25 23:01:18.290023287 +0000 UTC m=+134.647682429" Nov 25 23:01:18 crc kubenswrapper[5045]: I1125 23:01:18.374611 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-9rjvw"] Nov 25 23:01:18 crc kubenswrapper[5045]: I1125 23:01:18.374794 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:01:18 crc kubenswrapper[5045]: E1125 23:01:18.374946 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:01:18 crc kubenswrapper[5045]: I1125 23:01:18.395821 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:01:18 crc kubenswrapper[5045]: I1125 23:01:18.395910 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:01:18 crc kubenswrapper[5045]: E1125 23:01:18.395985 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:01:18 crc kubenswrapper[5045]: I1125 23:01:18.396171 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:01:18 crc kubenswrapper[5045]: E1125 23:01:18.396298 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:01:18 crc kubenswrapper[5045]: E1125 23:01:18.396548 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:01:19 crc kubenswrapper[5045]: E1125 23:01:19.519702 5045 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 23:01:20 crc kubenswrapper[5045]: I1125 23:01:20.396281 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:01:20 crc kubenswrapper[5045]: I1125 23:01:20.396431 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:01:20 crc kubenswrapper[5045]: I1125 23:01:20.396306 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:01:20 crc kubenswrapper[5045]: I1125 23:01:20.396567 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:01:20 crc kubenswrapper[5045]: E1125 23:01:20.396529 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:01:20 crc kubenswrapper[5045]: E1125 23:01:20.396659 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:01:20 crc kubenswrapper[5045]: E1125 23:01:20.396849 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:01:20 crc kubenswrapper[5045]: E1125 23:01:20.397004 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:01:22 crc kubenswrapper[5045]: I1125 23:01:22.395917 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:01:22 crc kubenswrapper[5045]: I1125 23:01:22.395992 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:01:22 crc kubenswrapper[5045]: E1125 23:01:22.396090 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:01:22 crc kubenswrapper[5045]: I1125 23:01:22.396170 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:01:22 crc kubenswrapper[5045]: I1125 23:01:22.396209 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:01:22 crc kubenswrapper[5045]: E1125 23:01:22.396293 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:01:22 crc kubenswrapper[5045]: E1125 23:01:22.396419 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:01:22 crc kubenswrapper[5045]: E1125 23:01:22.396482 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:01:24 crc kubenswrapper[5045]: I1125 23:01:24.397016 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:01:24 crc kubenswrapper[5045]: E1125 23:01:24.398557 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 23:01:24 crc kubenswrapper[5045]: I1125 23:01:24.398920 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:01:24 crc kubenswrapper[5045]: I1125 23:01:24.398980 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:01:24 crc kubenswrapper[5045]: E1125 23:01:24.399088 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9rjvw" podUID="9e044b50-b07a-44a0-b69f-45fd4392de24" Nov 25 23:01:24 crc kubenswrapper[5045]: I1125 23:01:24.399354 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:01:24 crc kubenswrapper[5045]: E1125 23:01:24.399437 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 23:01:24 crc kubenswrapper[5045]: E1125 23:01:24.399583 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 23:01:26 crc kubenswrapper[5045]: I1125 23:01:26.396074 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:01:26 crc kubenswrapper[5045]: I1125 23:01:26.396418 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:01:26 crc kubenswrapper[5045]: I1125 23:01:26.396947 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:01:26 crc kubenswrapper[5045]: I1125 23:01:26.396948 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:01:26 crc kubenswrapper[5045]: I1125 23:01:26.399137 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 23:01:26 crc kubenswrapper[5045]: I1125 23:01:26.399943 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 23:01:26 crc kubenswrapper[5045]: I1125 23:01:26.399961 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 23:01:26 crc kubenswrapper[5045]: I1125 23:01:26.400079 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 23:01:26 crc kubenswrapper[5045]: I1125 23:01:26.400137 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 23:01:26 crc kubenswrapper[5045]: I1125 23:01:26.400416 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 23:01:30 crc kubenswrapper[5045]: I1125 23:01:30.541604 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:01:30 crc kubenswrapper[5045]: I1125 23:01:30.541699 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:01:32 crc kubenswrapper[5045]: I1125 23:01:32.123940 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:32 crc kubenswrapper[5045]: E1125 23:01:32.124073 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:03:34.124039795 +0000 UTC m=+270.481698947 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:32 crc kubenswrapper[5045]: I1125 23:01:32.124170 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:01:32 crc kubenswrapper[5045]: I1125 23:01:32.124222 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:01:32 crc kubenswrapper[5045]: I1125 23:01:32.125605 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:01:32 crc kubenswrapper[5045]: I1125 23:01:32.132484 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:01:32 crc kubenswrapper[5045]: I1125 23:01:32.161266 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 23:01:32 crc kubenswrapper[5045]: I1125 23:01:32.225420 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:01:32 crc kubenswrapper[5045]: I1125 23:01:32.225530 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:01:32 crc kubenswrapper[5045]: I1125 23:01:32.229533 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:01:32 crc kubenswrapper[5045]: I1125 23:01:32.231804 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:01:32 crc kubenswrapper[5045]: I1125 23:01:32.430966 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 23:01:32 crc kubenswrapper[5045]: I1125 23:01:32.447348 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:01:32 crc kubenswrapper[5045]: W1125 23:01:32.473098 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-6ff4ad901fc5946e46bd220f6d38aed76d4671d4043acd7358e3bb759c68e199 WatchSource:0}: Error finding container 6ff4ad901fc5946e46bd220f6d38aed76d4671d4043acd7358e3bb759c68e199: Status 404 returned error can't find the container with id 6ff4ad901fc5946e46bd220f6d38aed76d4671d4043acd7358e3bb759c68e199 Nov 25 23:01:32 crc kubenswrapper[5045]: W1125 23:01:32.742456 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-f2bb1e49ff890d88dcdc24826d181048974428101ae559aa9b393d15aac2eefe WatchSource:0}: Error finding container f2bb1e49ff890d88dcdc24826d181048974428101ae559aa9b393d15aac2eefe: Status 404 returned error can't find the container with id f2bb1e49ff890d88dcdc24826d181048974428101ae559aa9b393d15aac2eefe Nov 25 23:01:33 crc kubenswrapper[5045]: I1125 23:01:33.309078 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"d3876cb06773425497ab0bb2791007e477836f7750289e2de1e3bf81fd05fe79"} Nov 25 23:01:33 crc kubenswrapper[5045]: I1125 23:01:33.309154 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"61b1d7e2288342198fbd25328420c69b3ee2ee60f6342238a25c9aa964bb78cc"} Nov 25 23:01:33 crc kubenswrapper[5045]: I1125 23:01:33.309389 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:01:33 crc kubenswrapper[5045]: I1125 23:01:33.313848 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"d9ea75c8435618d0e76c9b8d725410c271b2394d4531fa56fb9cebd91ee4cc47"} Nov 25 23:01:33 crc kubenswrapper[5045]: I1125 23:01:33.313994 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"6ff4ad901fc5946e46bd220f6d38aed76d4671d4043acd7358e3bb759c68e199"} Nov 25 23:01:33 crc kubenswrapper[5045]: I1125 23:01:33.316950 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"bbc093396a67e76edce62d84157e3ea037b78f2b00f842dab5838c4a0dcee91e"} Nov 25 23:01:33 crc kubenswrapper[5045]: I1125 23:01:33.317019 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"f2bb1e49ff890d88dcdc24826d181048974428101ae559aa9b393d15aac2eefe"} Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.259960 5045 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.310848 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-dlljd"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.311484 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.312029 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-mnm5x"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.312345 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.312808 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4djv"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.313139 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4djv" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.313806 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-tst4h"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.314328 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-tst4h" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.318439 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.318479 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.318520 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.318742 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.318835 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.318860 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.318968 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 23:01:34 crc kubenswrapper[5045]: W1125 23:01:34.318991 5045 reflector.go:561] object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c": failed to list *v1.Secret: secrets "openshift-controller-manager-sa-dockercfg-msq4c" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 25 23:01:34 crc kubenswrapper[5045]: E1125 23:01:34.319031 5045 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"openshift-controller-manager-sa-dockercfg-msq4c\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-controller-manager-sa-dockercfg-msq4c\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 23:01:34 crc kubenswrapper[5045]: W1125 23:01:34.320983 5045 reflector.go:561] object-"openshift-controller-manager"/"client-ca": failed to list *v1.ConfigMap: configmaps "client-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 25 23:01:34 crc kubenswrapper[5045]: E1125 23:01:34.321052 5045 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"client-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"client-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.323184 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.323790 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" Nov 25 23:01:34 crc kubenswrapper[5045]: W1125 23:01:34.330684 5045 reflector.go:561] object-"openshift-controller-manager"/"serving-cert": failed to list *v1.Secret: secrets "serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 25 23:01:34 crc kubenswrapper[5045]: E1125 23:01:34.330752 5045 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.330695 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 23:01:34 crc kubenswrapper[5045]: W1125 23:01:34.331074 5045 reflector.go:561] object-"openshift-controller-manager"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 25 23:01:34 crc kubenswrapper[5045]: E1125 23:01:34.331101 5045 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 23:01:34 crc kubenswrapper[5045]: W1125 23:01:34.331320 5045 reflector.go:561] object-"openshift-controller-manager"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 25 23:01:34 crc kubenswrapper[5045]: E1125 23:01:34.331361 5045 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.331749 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 23:01:34 crc kubenswrapper[5045]: W1125 23:01:34.332059 5045 reflector.go:561] object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config": failed to list *v1.ConfigMap: configmaps "openshift-apiserver-operator-config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Nov 25 23:01:34 crc kubenswrapper[5045]: E1125 23:01:34.332104 5045 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-apiserver-operator-config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 23:01:34 crc kubenswrapper[5045]: W1125 23:01:34.332814 5045 reflector.go:561] object-"openshift-controller-manager"/"config": failed to list *v1.ConfigMap: configmaps "config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 25 23:01:34 crc kubenswrapper[5045]: E1125 23:01:34.332844 5045 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.332872 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.333393 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.333466 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.333868 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.347502 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fgz2j"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.347939 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.348254 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.348297 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.348367 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fgz2j" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.348507 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.348576 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.348903 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.348958 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.349086 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.349103 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.349201 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.349252 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.349220 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.350321 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.351134 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-4rwm4"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.351774 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drbtr"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.351940 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.352204 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drbtr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.352610 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-4rwm4" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.352901 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.365452 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.366136 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.366441 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.366579 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.369214 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-q6hcb"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.370029 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.370438 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.370803 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2btnr"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.370886 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-q6hcb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.371094 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.371329 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.371358 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.371544 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.371545 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.371974 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.372048 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.372054 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.372064 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.375039 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.377688 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.377863 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.382471 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.382630 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.382774 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.382859 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.383940 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.392694 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-gmw7c"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.393172 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.403878 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.404299 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.404602 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.404781 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.404897 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.404998 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.405035 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.405104 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.405202 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.405269 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.405334 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.405446 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.405549 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.405627 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.405695 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.405627 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.405699 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.405806 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.405897 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.405988 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.406010 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.406062 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.406650 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-ws5pv"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.406996 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-9cmr2"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.407277 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.407475 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-ws5pv" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.412978 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.415031 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.415067 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.417478 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-85vcd"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.417944 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-85vcd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.418529 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.436071 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.437365 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.440647 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.441179 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.441386 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.441955 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.442097 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.442237 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.442479 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.442642 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.450148 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.452665 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.453212 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-j7nnq"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.490542 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.490867 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.491035 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.491070 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.491183 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.491239 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.491254 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.491264 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.491306 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0079ce5c-97a3-43a2-8b93-df87ee4de76b-config\") pod \"openshift-apiserver-operator-796bbdcf4f-x4djv\" (UID: \"0079ce5c-97a3-43a2-8b93-df87ee4de76b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4djv" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.491339 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc7739f6-7c08-44a8-aad4-4ec37a9016f2-config\") pod \"authentication-operator-69f744f599-q6hcb\" (UID: \"cc7739f6-7c08-44a8-aad4-4ec37a9016f2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q6hcb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.491360 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0079ce5c-97a3-43a2-8b93-df87ee4de76b-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-x4djv\" (UID: \"0079ce5c-97a3-43a2-8b93-df87ee4de76b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4djv" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.491377 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4e266b78-e9fa-40bf-844b-7d5e273e988b-client-ca\") pod \"route-controller-manager-6576b87f9c-h6sq9\" (UID: \"4e266b78-e9fa-40bf-844b-7d5e273e988b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.491390 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.491041 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.491490 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.491524 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qw6j6"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.491837 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492166 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-j7nnq" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492230 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492168 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4djv"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492438 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-jjrzz"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492444 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qw6j6" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.491391 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/601df050-5421-4266-bf7c-60096a066a24-config\") pod \"machine-api-operator-5694c8668f-tst4h\" (UID: \"601df050-5421-4266-bf7c-60096a066a24\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tst4h" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492514 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/b81392a7-2366-421d-834c-d72869a34014-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-fgz2j\" (UID: \"b81392a7-2366-421d-834c-d72869a34014\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fgz2j" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492532 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6x5j\" (UniqueName: \"kubernetes.io/projected/c9443fc5-a284-4838-a107-6146af9d6bba-kube-api-access-v6x5j\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492551 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cc7739f6-7c08-44a8-aad4-4ec37a9016f2-serving-cert\") pod \"authentication-operator-69f744f599-q6hcb\" (UID: \"cc7739f6-7c08-44a8-aad4-4ec37a9016f2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q6hcb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492574 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2v69\" (UniqueName: \"kubernetes.io/projected/299d494f-f519-4793-b234-6fa6174e9428-kube-api-access-z2v69\") pod \"openshift-config-operator-7777fb866f-zzgj8\" (UID: \"299d494f-f519-4793-b234-6fa6174e9428\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492590 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/0ec8737e-20e0-4f51-b134-a60be096e1df-image-import-ca\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492609 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0ec8737e-20e0-4f51-b134-a60be096e1df-audit-dir\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492628 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/859fc901-fe58-44e6-b133-9da8193dd02f-trusted-ca\") pod \"console-operator-58897d9998-4rwm4\" (UID: \"859fc901-fe58-44e6-b133-9da8193dd02f\") " pod="openshift-console-operator/console-operator-58897d9998-4rwm4" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492648 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e266b78-e9fa-40bf-844b-7d5e273e988b-serving-cert\") pod \"route-controller-manager-6576b87f9c-h6sq9\" (UID: \"4e266b78-e9fa-40bf-844b-7d5e273e988b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492665 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9w77\" (UniqueName: \"kubernetes.io/projected/4e266b78-e9fa-40bf-844b-7d5e273e988b-kube-api-access-w9w77\") pod \"route-controller-manager-6576b87f9c-h6sq9\" (UID: \"4e266b78-e9fa-40bf-844b-7d5e273e988b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492682 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9443fc5-a284-4838-a107-6146af9d6bba-serving-cert\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492699 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/601df050-5421-4266-bf7c-60096a066a24-images\") pod \"machine-api-operator-5694c8668f-tst4h\" (UID: \"601df050-5421-4266-bf7c-60096a066a24\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tst4h" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492730 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-485bw\" (UniqueName: \"kubernetes.io/projected/b81392a7-2366-421d-834c-d72869a34014-kube-api-access-485bw\") pod \"cluster-samples-operator-665b6dd947-fgz2j\" (UID: \"b81392a7-2366-421d-834c-d72869a34014\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fgz2j" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492748 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/601df050-5421-4266-bf7c-60096a066a24-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-tst4h\" (UID: \"601df050-5421-4266-bf7c-60096a066a24\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tst4h" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492762 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvctf\" (UniqueName: \"kubernetes.io/projected/601df050-5421-4266-bf7c-60096a066a24-kube-api-access-dvctf\") pod \"machine-api-operator-5694c8668f-tst4h\" (UID: \"601df050-5421-4266-bf7c-60096a066a24\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tst4h" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492781 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cc7739f6-7c08-44a8-aad4-4ec37a9016f2-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-q6hcb\" (UID: \"cc7739f6-7c08-44a8-aad4-4ec37a9016f2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q6hcb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492797 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0ec8737e-20e0-4f51-b134-a60be096e1df-encryption-config\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492812 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwfj4\" (UniqueName: \"kubernetes.io/projected/0ec8737e-20e0-4f51-b134-a60be096e1df-kube-api-access-xwfj4\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492832 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/299d494f-f519-4793-b234-6fa6174e9428-available-featuregates\") pod \"openshift-config-operator-7777fb866f-zzgj8\" (UID: \"299d494f-f519-4793-b234-6fa6174e9428\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492849 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/148a8bcd-f16e-4a15-89a2-61a79074bdf4-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-drbtr\" (UID: \"148a8bcd-f16e-4a15-89a2-61a79074bdf4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drbtr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492867 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e266b78-e9fa-40bf-844b-7d5e273e988b-config\") pod \"route-controller-manager-6576b87f9c-h6sq9\" (UID: \"4e266b78-e9fa-40bf-844b-7d5e273e988b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492883 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/859fc901-fe58-44e6-b133-9da8193dd02f-config\") pod \"console-operator-58897d9998-4rwm4\" (UID: \"859fc901-fe58-44e6-b133-9da8193dd02f\") " pod="openshift-console-operator/console-operator-58897d9998-4rwm4" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492897 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492913 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0ec8737e-20e0-4f51-b134-a60be096e1df-etcd-client\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492928 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0ec8737e-20e0-4f51-b134-a60be096e1df-serving-cert\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492956 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ec8737e-20e0-4f51-b134-a60be096e1df-config\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492977 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpcw5\" (UniqueName: \"kubernetes.io/projected/cc7739f6-7c08-44a8-aad4-4ec37a9016f2-kube-api-access-qpcw5\") pod \"authentication-operator-69f744f599-q6hcb\" (UID: \"cc7739f6-7c08-44a8-aad4-4ec37a9016f2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q6hcb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.492994 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/148a8bcd-f16e-4a15-89a2-61a79074bdf4-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-drbtr\" (UID: \"148a8bcd-f16e-4a15-89a2-61a79074bdf4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drbtr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.493009 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-client-ca\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.493031 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/299d494f-f519-4793-b234-6fa6174e9428-serving-cert\") pod \"openshift-config-operator-7777fb866f-zzgj8\" (UID: \"299d494f-f519-4793-b234-6fa6174e9428\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.493048 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7d8q\" (UniqueName: \"kubernetes.io/projected/0079ce5c-97a3-43a2-8b93-df87ee4de76b-kube-api-access-l7d8q\") pod \"openshift-apiserver-operator-796bbdcf4f-x4djv\" (UID: \"0079ce5c-97a3-43a2-8b93-df87ee4de76b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4djv" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.493062 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/0ec8737e-20e0-4f51-b134-a60be096e1df-audit\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.493081 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njf66\" (UniqueName: \"kubernetes.io/projected/859fc901-fe58-44e6-b133-9da8193dd02f-kube-api-access-njf66\") pod \"console-operator-58897d9998-4rwm4\" (UID: \"859fc901-fe58-44e6-b133-9da8193dd02f\") " pod="openshift-console-operator/console-operator-58897d9998-4rwm4" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.493099 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrsmg\" (UniqueName: \"kubernetes.io/projected/148a8bcd-f16e-4a15-89a2-61a79074bdf4-kube-api-access-zrsmg\") pod \"cluster-image-registry-operator-dc59b4c8b-drbtr\" (UID: \"148a8bcd-f16e-4a15-89a2-61a79074bdf4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drbtr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.493114 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cc7739f6-7c08-44a8-aad4-4ec37a9016f2-service-ca-bundle\") pod \"authentication-operator-69f744f599-q6hcb\" (UID: \"cc7739f6-7c08-44a8-aad4-4ec37a9016f2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q6hcb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.493135 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0ec8737e-20e0-4f51-b134-a60be096e1df-trusted-ca-bundle\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.493152 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/0ec8737e-20e0-4f51-b134-a60be096e1df-node-pullsecrets\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.493167 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/859fc901-fe58-44e6-b133-9da8193dd02f-serving-cert\") pod \"console-operator-58897d9998-4rwm4\" (UID: \"859fc901-fe58-44e6-b133-9da8193dd02f\") " pod="openshift-console-operator/console-operator-58897d9998-4rwm4" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.493191 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/148a8bcd-f16e-4a15-89a2-61a79074bdf4-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-drbtr\" (UID: \"148a8bcd-f16e-4a15-89a2-61a79074bdf4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drbtr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.493206 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-config\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.493221 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0ec8737e-20e0-4f51-b134-a60be096e1df-etcd-serving-ca\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.493478 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.493773 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.494028 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.495426 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-84zml"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.495693 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7q4w2"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.495744 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.495818 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-84zml" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.496480 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7q4w2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.496551 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.496490 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-ml4r4"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.496851 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.496964 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-ml4r4" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.497386 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-mnm5x"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.498228 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-fb55z"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.498851 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fb55z" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.503086 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-tst4h"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.503172 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fgz2j"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.504548 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.505807 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-dlljd"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.506751 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-69wcg"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.507199 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2c9bl"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.507538 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2c9bl" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.507777 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-955fw"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.508173 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-955fw" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.507782 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-69wcg" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.511395 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.511688 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.511997 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.512148 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.514205 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.515226 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ldz47"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.515576 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ldz47" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.519304 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-tnf9q"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.519697 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.533048 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.533166 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fs58x"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.534798 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-nlsrs"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.535034 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fs58x" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.535150 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-v9nc5"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.535383 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-nlsrs" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.536165 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-v9nc5" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.536964 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-t75fl"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.537590 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-t75fl" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.542129 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-xz98p"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.544369 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.544813 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-xz98p" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.545531 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.545951 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.547355 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.550338 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-4rwm4"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.551927 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-q6hcb"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.552878 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-hbpfm"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.553621 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-hbpfm" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.553854 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-jjrzz"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.554771 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-9cmr2"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.557037 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-ws5pv"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.558334 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-85vcd"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.559008 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.559840 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.561141 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drbtr"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.561200 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.561844 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-69wcg"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.562807 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-gmw7c"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.564298 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qw6j6"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.565628 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-j7nnq"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.566685 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2btnr"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.567915 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fs58x"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.568897 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-ml4r4"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.570848 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ldz47"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.570913 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-hbpfm"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.571968 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-v9nc5"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.573046 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-955fw"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.574078 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-dgl6n"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.574666 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-dgl6n" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.574800 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.575049 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.576249 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.577348 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2c9bl"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.578371 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7q4w2"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.579519 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-t75fl"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.581094 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-nlsrs"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.582035 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.583022 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-84zml"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.584154 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-xz98p"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.584972 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-z5ldf"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.585577 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-z5ldf" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.585902 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-wxk7s"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.586855 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.588349 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-wxk7s"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.588578 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-z5ldf"] Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593172 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593586 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cc7739f6-7c08-44a8-aad4-4ec37a9016f2-serving-cert\") pod \"authentication-operator-69f744f599-q6hcb\" (UID: \"cc7739f6-7c08-44a8-aad4-4ec37a9016f2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q6hcb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593614 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/859fc901-fe58-44e6-b133-9da8193dd02f-trusted-ca\") pod \"console-operator-58897d9998-4rwm4\" (UID: \"859fc901-fe58-44e6-b133-9da8193dd02f\") " pod="openshift-console-operator/console-operator-58897d9998-4rwm4" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593635 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2v69\" (UniqueName: \"kubernetes.io/projected/299d494f-f519-4793-b234-6fa6174e9428-kube-api-access-z2v69\") pod \"openshift-config-operator-7777fb866f-zzgj8\" (UID: \"299d494f-f519-4793-b234-6fa6174e9428\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593654 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/0ec8737e-20e0-4f51-b134-a60be096e1df-image-import-ca\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593670 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0ec8737e-20e0-4f51-b134-a60be096e1df-audit-dir\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593690 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593724 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593744 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-service-ca\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593759 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9443fc5-a284-4838-a107-6146af9d6bba-serving-cert\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593792 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e266b78-e9fa-40bf-844b-7d5e273e988b-serving-cert\") pod \"route-controller-manager-6576b87f9c-h6sq9\" (UID: \"4e266b78-e9fa-40bf-844b-7d5e273e988b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593810 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9w77\" (UniqueName: \"kubernetes.io/projected/4e266b78-e9fa-40bf-844b-7d5e273e988b-kube-api-access-w9w77\") pod \"route-controller-manager-6576b87f9c-h6sq9\" (UID: \"4e266b78-e9fa-40bf-844b-7d5e273e988b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593825 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/601df050-5421-4266-bf7c-60096a066a24-images\") pod \"machine-api-operator-5694c8668f-tst4h\" (UID: \"601df050-5421-4266-bf7c-60096a066a24\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tst4h" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593840 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvctf\" (UniqueName: \"kubernetes.io/projected/601df050-5421-4266-bf7c-60096a066a24-kube-api-access-dvctf\") pod \"machine-api-operator-5694c8668f-tst4h\" (UID: \"601df050-5421-4266-bf7c-60096a066a24\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tst4h" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593858 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-485bw\" (UniqueName: \"kubernetes.io/projected/b81392a7-2366-421d-834c-d72869a34014-kube-api-access-485bw\") pod \"cluster-samples-operator-665b6dd947-fgz2j\" (UID: \"b81392a7-2366-421d-834c-d72869a34014\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fgz2j" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593875 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/601df050-5421-4266-bf7c-60096a066a24-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-tst4h\" (UID: \"601df050-5421-4266-bf7c-60096a066a24\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tst4h" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593895 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fk57t\" (UniqueName: \"kubernetes.io/projected/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-kube-api-access-fk57t\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593913 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cc7739f6-7c08-44a8-aad4-4ec37a9016f2-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-q6hcb\" (UID: \"cc7739f6-7c08-44a8-aad4-4ec37a9016f2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q6hcb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593928 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0ec8737e-20e0-4f51-b134-a60be096e1df-encryption-config\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593945 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593962 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwfj4\" (UniqueName: \"kubernetes.io/projected/0ec8737e-20e0-4f51-b134-a60be096e1df-kube-api-access-xwfj4\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593979 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/701c2cdf-5d93-4d9d-a2a9-279ed3f62d8e-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-qw6j6\" (UID: \"701c2cdf-5d93-4d9d-a2a9-279ed3f62d8e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qw6j6" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.593997 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-oauth-serving-cert\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594014 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-audit-policies\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594032 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f848dbf6-817b-44d7-b410-7ac266166501-audit-dir\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594049 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2rd4\" (UniqueName: \"kubernetes.io/projected/e59f0f14-0c21-43c9-baaa-bf860aaa16b3-kube-api-access-g2rd4\") pod \"downloads-7954f5f757-ws5pv\" (UID: \"e59f0f14-0c21-43c9-baaa-bf860aaa16b3\") " pod="openshift-console/downloads-7954f5f757-ws5pv" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594066 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/50babc27-1757-4498-abd8-0fd5a1033b80-encryption-config\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594083 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/299d494f-f519-4793-b234-6fa6174e9428-available-featuregates\") pod \"openshift-config-operator-7777fb866f-zzgj8\" (UID: \"299d494f-f519-4793-b234-6fa6174e9428\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594102 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqwlr\" (UniqueName: \"kubernetes.io/projected/50babc27-1757-4498-abd8-0fd5a1033b80-kube-api-access-dqwlr\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594122 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/148a8bcd-f16e-4a15-89a2-61a79074bdf4-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-drbtr\" (UID: \"148a8bcd-f16e-4a15-89a2-61a79074bdf4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drbtr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594139 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/859fc901-fe58-44e6-b133-9da8193dd02f-config\") pod \"console-operator-58897d9998-4rwm4\" (UID: \"859fc901-fe58-44e6-b133-9da8193dd02f\") " pod="openshift-console-operator/console-operator-58897d9998-4rwm4" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594154 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594171 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0ec8737e-20e0-4f51-b134-a60be096e1df-etcd-client\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594187 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0ec8737e-20e0-4f51-b134-a60be096e1df-serving-cert\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594203 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e266b78-e9fa-40bf-844b-7d5e273e988b-config\") pod \"route-controller-manager-6576b87f9c-h6sq9\" (UID: \"4e266b78-e9fa-40bf-844b-7d5e273e988b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594220 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-console-serving-cert\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594235 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/50babc27-1757-4498-abd8-0fd5a1033b80-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594259 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ec8737e-20e0-4f51-b134-a60be096e1df-config\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594274 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/701c2cdf-5d93-4d9d-a2a9-279ed3f62d8e-config\") pod \"kube-controller-manager-operator-78b949d7b-qw6j6\" (UID: \"701c2cdf-5d93-4d9d-a2a9-279ed3f62d8e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qw6j6" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594290 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/50babc27-1757-4498-abd8-0fd5a1033b80-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594309 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpcw5\" (UniqueName: \"kubernetes.io/projected/cc7739f6-7c08-44a8-aad4-4ec37a9016f2-kube-api-access-qpcw5\") pod \"authentication-operator-69f744f599-q6hcb\" (UID: \"cc7739f6-7c08-44a8-aad4-4ec37a9016f2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q6hcb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594324 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/701c2cdf-5d93-4d9d-a2a9-279ed3f62d8e-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-qw6j6\" (UID: \"701c2cdf-5d93-4d9d-a2a9-279ed3f62d8e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qw6j6" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594342 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/148a8bcd-f16e-4a15-89a2-61a79074bdf4-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-drbtr\" (UID: \"148a8bcd-f16e-4a15-89a2-61a79074bdf4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drbtr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594358 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-client-ca\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594376 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3c6e0f76-e954-4f89-a2c8-fccbe4440171-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-85vcd\" (UID: \"3c6e0f76-e954-4f89-a2c8-fccbe4440171\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-85vcd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594394 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/299d494f-f519-4793-b234-6fa6174e9428-serving-cert\") pod \"openshift-config-operator-7777fb866f-zzgj8\" (UID: \"299d494f-f519-4793-b234-6fa6174e9428\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594411 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594428 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594443 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/0ec8737e-20e0-4f51-b134-a60be096e1df-audit\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594462 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7d8q\" (UniqueName: \"kubernetes.io/projected/0079ce5c-97a3-43a2-8b93-df87ee4de76b-kube-api-access-l7d8q\") pod \"openshift-apiserver-operator-796bbdcf4f-x4djv\" (UID: \"0079ce5c-97a3-43a2-8b93-df87ee4de76b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4djv" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594480 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594496 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-trusted-ca-bundle\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594511 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ldfmv\" (UniqueName: \"kubernetes.io/projected/f848dbf6-817b-44d7-b410-7ac266166501-kube-api-access-ldfmv\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594530 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njf66\" (UniqueName: \"kubernetes.io/projected/859fc901-fe58-44e6-b133-9da8193dd02f-kube-api-access-njf66\") pod \"console-operator-58897d9998-4rwm4\" (UID: \"859fc901-fe58-44e6-b133-9da8193dd02f\") " pod="openshift-console-operator/console-operator-58897d9998-4rwm4" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594545 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594565 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrsmg\" (UniqueName: \"kubernetes.io/projected/148a8bcd-f16e-4a15-89a2-61a79074bdf4-kube-api-access-zrsmg\") pod \"cluster-image-registry-operator-dc59b4c8b-drbtr\" (UID: \"148a8bcd-f16e-4a15-89a2-61a79074bdf4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drbtr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594582 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cc7739f6-7c08-44a8-aad4-4ec37a9016f2-service-ca-bundle\") pod \"authentication-operator-69f744f599-q6hcb\" (UID: \"cc7739f6-7c08-44a8-aad4-4ec37a9016f2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q6hcb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594597 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-console-config\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594613 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0ec8737e-20e0-4f51-b134-a60be096e1df-trusted-ca-bundle\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594629 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/50babc27-1757-4498-abd8-0fd5a1033b80-audit-policies\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594646 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/0ec8737e-20e0-4f51-b134-a60be096e1df-node-pullsecrets\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594661 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/50babc27-1757-4498-abd8-0fd5a1033b80-etcd-client\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594678 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/148a8bcd-f16e-4a15-89a2-61a79074bdf4-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-drbtr\" (UID: \"148a8bcd-f16e-4a15-89a2-61a79074bdf4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drbtr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594693 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/859fc901-fe58-44e6-b133-9da8193dd02f-serving-cert\") pod \"console-operator-58897d9998-4rwm4\" (UID: \"859fc901-fe58-44e6-b133-9da8193dd02f\") " pod="openshift-console-operator/console-operator-58897d9998-4rwm4" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594722 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594740 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594757 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-config\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594772 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0ec8737e-20e0-4f51-b134-a60be096e1df-etcd-serving-ca\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594788 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-console-oauth-config\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594804 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c6e0f76-e954-4f89-a2c8-fccbe4440171-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-85vcd\" (UID: \"3c6e0f76-e954-4f89-a2c8-fccbe4440171\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-85vcd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594820 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/50babc27-1757-4498-abd8-0fd5a1033b80-audit-dir\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594836 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0079ce5c-97a3-43a2-8b93-df87ee4de76b-config\") pod \"openshift-apiserver-operator-796bbdcf4f-x4djv\" (UID: \"0079ce5c-97a3-43a2-8b93-df87ee4de76b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4djv" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594854 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc7739f6-7c08-44a8-aad4-4ec37a9016f2-config\") pod \"authentication-operator-69f744f599-q6hcb\" (UID: \"cc7739f6-7c08-44a8-aad4-4ec37a9016f2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q6hcb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594871 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594887 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594904 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hh84h\" (UniqueName: \"kubernetes.io/projected/3c6e0f76-e954-4f89-a2c8-fccbe4440171-kube-api-access-hh84h\") pod \"openshift-controller-manager-operator-756b6f6bc6-85vcd\" (UID: \"3c6e0f76-e954-4f89-a2c8-fccbe4440171\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-85vcd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594914 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/859fc901-fe58-44e6-b133-9da8193dd02f-trusted-ca\") pod \"console-operator-58897d9998-4rwm4\" (UID: \"859fc901-fe58-44e6-b133-9da8193dd02f\") " pod="openshift-console-operator/console-operator-58897d9998-4rwm4" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594920 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0079ce5c-97a3-43a2-8b93-df87ee4de76b-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-x4djv\" (UID: \"0079ce5c-97a3-43a2-8b93-df87ee4de76b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4djv" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594971 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4e266b78-e9fa-40bf-844b-7d5e273e988b-client-ca\") pod \"route-controller-manager-6576b87f9c-h6sq9\" (UID: \"4e266b78-e9fa-40bf-844b-7d5e273e988b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.594995 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/b81392a7-2366-421d-834c-d72869a34014-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-fgz2j\" (UID: \"b81392a7-2366-421d-834c-d72869a34014\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fgz2j" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.595015 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6x5j\" (UniqueName: \"kubernetes.io/projected/c9443fc5-a284-4838-a107-6146af9d6bba-kube-api-access-v6x5j\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.595135 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/601df050-5421-4266-bf7c-60096a066a24-config\") pod \"machine-api-operator-5694c8668f-tst4h\" (UID: \"601df050-5421-4266-bf7c-60096a066a24\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tst4h" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.595157 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50babc27-1757-4498-abd8-0fd5a1033b80-serving-cert\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.595955 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4e266b78-e9fa-40bf-844b-7d5e273e988b-client-ca\") pod \"route-controller-manager-6576b87f9c-h6sq9\" (UID: \"4e266b78-e9fa-40bf-844b-7d5e273e988b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.596914 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/299d494f-f519-4793-b234-6fa6174e9428-available-featuregates\") pod \"openshift-config-operator-7777fb866f-zzgj8\" (UID: \"299d494f-f519-4793-b234-6fa6174e9428\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.597185 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/601df050-5421-4266-bf7c-60096a066a24-config\") pod \"machine-api-operator-5694c8668f-tst4h\" (UID: \"601df050-5421-4266-bf7c-60096a066a24\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tst4h" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.597273 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/859fc901-fe58-44e6-b133-9da8193dd02f-config\") pod \"console-operator-58897d9998-4rwm4\" (UID: \"859fc901-fe58-44e6-b133-9da8193dd02f\") " pod="openshift-console-operator/console-operator-58897d9998-4rwm4" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.597566 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/601df050-5421-4266-bf7c-60096a066a24-images\") pod \"machine-api-operator-5694c8668f-tst4h\" (UID: \"601df050-5421-4266-bf7c-60096a066a24\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tst4h" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.597893 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ec8737e-20e0-4f51-b134-a60be096e1df-config\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.597965 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/0ec8737e-20e0-4f51-b134-a60be096e1df-audit\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.598100 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.598523 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0ec8737e-20e0-4f51-b134-a60be096e1df-etcd-serving-ca\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.598580 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/0ec8737e-20e0-4f51-b134-a60be096e1df-node-pullsecrets\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.598725 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0079ce5c-97a3-43a2-8b93-df87ee4de76b-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-x4djv\" (UID: \"0079ce5c-97a3-43a2-8b93-df87ee4de76b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4djv" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.599228 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e266b78-e9fa-40bf-844b-7d5e273e988b-config\") pod \"route-controller-manager-6576b87f9c-h6sq9\" (UID: \"4e266b78-e9fa-40bf-844b-7d5e273e988b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.599285 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cc7739f6-7c08-44a8-aad4-4ec37a9016f2-service-ca-bundle\") pod \"authentication-operator-69f744f599-q6hcb\" (UID: \"cc7739f6-7c08-44a8-aad4-4ec37a9016f2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q6hcb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.599612 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/148a8bcd-f16e-4a15-89a2-61a79074bdf4-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-drbtr\" (UID: \"148a8bcd-f16e-4a15-89a2-61a79074bdf4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drbtr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.599689 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0ec8737e-20e0-4f51-b134-a60be096e1df-audit-dir\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.600020 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc7739f6-7c08-44a8-aad4-4ec37a9016f2-config\") pod \"authentication-operator-69f744f599-q6hcb\" (UID: \"cc7739f6-7c08-44a8-aad4-4ec37a9016f2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q6hcb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.600126 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/0ec8737e-20e0-4f51-b134-a60be096e1df-image-import-ca\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.601107 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cc7739f6-7c08-44a8-aad4-4ec37a9016f2-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-q6hcb\" (UID: \"cc7739f6-7c08-44a8-aad4-4ec37a9016f2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q6hcb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.601129 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cc7739f6-7c08-44a8-aad4-4ec37a9016f2-serving-cert\") pod \"authentication-operator-69f744f599-q6hcb\" (UID: \"cc7739f6-7c08-44a8-aad4-4ec37a9016f2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q6hcb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.601604 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/859fc901-fe58-44e6-b133-9da8193dd02f-serving-cert\") pod \"console-operator-58897d9998-4rwm4\" (UID: \"859fc901-fe58-44e6-b133-9da8193dd02f\") " pod="openshift-console-operator/console-operator-58897d9998-4rwm4" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.600131 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0ec8737e-20e0-4f51-b134-a60be096e1df-trusted-ca-bundle\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.601731 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0ec8737e-20e0-4f51-b134-a60be096e1df-encryption-config\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.601975 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/148a8bcd-f16e-4a15-89a2-61a79074bdf4-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-drbtr\" (UID: \"148a8bcd-f16e-4a15-89a2-61a79074bdf4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drbtr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.602023 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/601df050-5421-4266-bf7c-60096a066a24-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-tst4h\" (UID: \"601df050-5421-4266-bf7c-60096a066a24\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tst4h" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.602033 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0ec8737e-20e0-4f51-b134-a60be096e1df-etcd-client\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.602235 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e266b78-e9fa-40bf-844b-7d5e273e988b-serving-cert\") pod \"route-controller-manager-6576b87f9c-h6sq9\" (UID: \"4e266b78-e9fa-40bf-844b-7d5e273e988b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.609306 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0ec8737e-20e0-4f51-b134-a60be096e1df-serving-cert\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.610184 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/b81392a7-2366-421d-834c-d72869a34014-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-fgz2j\" (UID: \"b81392a7-2366-421d-834c-d72869a34014\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fgz2j" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.610867 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/299d494f-f519-4793-b234-6fa6174e9428-serving-cert\") pod \"openshift-config-operator-7777fb866f-zzgj8\" (UID: \"299d494f-f519-4793-b234-6fa6174e9428\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.613659 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.633629 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.653502 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.672963 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.693438 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.696173 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.696766 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.696794 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-console-oauth-config\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.696827 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c6e0f76-e954-4f89-a2c8-fccbe4440171-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-85vcd\" (UID: \"3c6e0f76-e954-4f89-a2c8-fccbe4440171\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-85vcd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.696845 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/50babc27-1757-4498-abd8-0fd5a1033b80-audit-dir\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697103 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/50babc27-1757-4498-abd8-0fd5a1033b80-audit-dir\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697181 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697202 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697219 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hh84h\" (UniqueName: \"kubernetes.io/projected/3c6e0f76-e954-4f89-a2c8-fccbe4440171-kube-api-access-hh84h\") pod \"openshift-controller-manager-operator-756b6f6bc6-85vcd\" (UID: \"3c6e0f76-e954-4f89-a2c8-fccbe4440171\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-85vcd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697242 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50babc27-1757-4498-abd8-0fd5a1033b80-serving-cert\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697278 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697301 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697322 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-service-ca\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697366 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fk57t\" (UniqueName: \"kubernetes.io/projected/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-kube-api-access-fk57t\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697383 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697442 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/701c2cdf-5d93-4d9d-a2a9-279ed3f62d8e-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-qw6j6\" (UID: \"701c2cdf-5d93-4d9d-a2a9-279ed3f62d8e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qw6j6" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697460 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-oauth-serving-cert\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697477 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f848dbf6-817b-44d7-b410-7ac266166501-audit-dir\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697495 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2rd4\" (UniqueName: \"kubernetes.io/projected/e59f0f14-0c21-43c9-baaa-bf860aaa16b3-kube-api-access-g2rd4\") pod \"downloads-7954f5f757-ws5pv\" (UID: \"e59f0f14-0c21-43c9-baaa-bf860aaa16b3\") " pod="openshift-console/downloads-7954f5f757-ws5pv" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697512 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/50babc27-1757-4498-abd8-0fd5a1033b80-encryption-config\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697529 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-audit-policies\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697545 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqwlr\" (UniqueName: \"kubernetes.io/projected/50babc27-1757-4498-abd8-0fd5a1033b80-kube-api-access-dqwlr\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697566 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-console-serving-cert\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697582 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/50babc27-1757-4498-abd8-0fd5a1033b80-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697599 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/701c2cdf-5d93-4d9d-a2a9-279ed3f62d8e-config\") pod \"kube-controller-manager-operator-78b949d7b-qw6j6\" (UID: \"701c2cdf-5d93-4d9d-a2a9-279ed3f62d8e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qw6j6" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697623 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/50babc27-1757-4498-abd8-0fd5a1033b80-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697632 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c6e0f76-e954-4f89-a2c8-fccbe4440171-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-85vcd\" (UID: \"3c6e0f76-e954-4f89-a2c8-fccbe4440171\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-85vcd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697640 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/701c2cdf-5d93-4d9d-a2a9-279ed3f62d8e-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-qw6j6\" (UID: \"701c2cdf-5d93-4d9d-a2a9-279ed3f62d8e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qw6j6" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697738 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3c6e0f76-e954-4f89-a2c8-fccbe4440171-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-85vcd\" (UID: \"3c6e0f76-e954-4f89-a2c8-fccbe4440171\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-85vcd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697771 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697794 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697838 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697861 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697882 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-trusted-ca-bundle\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697921 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697948 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ldfmv\" (UniqueName: \"kubernetes.io/projected/f848dbf6-817b-44d7-b410-7ac266166501-kube-api-access-ldfmv\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.697984 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-console-config\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.698008 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/50babc27-1757-4498-abd8-0fd5a1033b80-audit-policies\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.698031 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/50babc27-1757-4498-abd8-0fd5a1033b80-etcd-client\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.698740 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f848dbf6-817b-44d7-b410-7ac266166501-audit-dir\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.698875 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-service-ca\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.699049 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-audit-policies\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.699261 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-oauth-serving-cert\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.699830 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/701c2cdf-5d93-4d9d-a2a9-279ed3f62d8e-config\") pod \"kube-controller-manager-operator-78b949d7b-qw6j6\" (UID: \"701c2cdf-5d93-4d9d-a2a9-279ed3f62d8e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qw6j6" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.700127 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.700313 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/50babc27-1757-4498-abd8-0fd5a1033b80-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.700420 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/50babc27-1757-4498-abd8-0fd5a1033b80-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.700802 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-trusted-ca-bundle\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.701279 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.701324 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-console-config\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.701383 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/701c2cdf-5d93-4d9d-a2a9-279ed3f62d8e-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-qw6j6\" (UID: \"701c2cdf-5d93-4d9d-a2a9-279ed3f62d8e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qw6j6" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.701748 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-console-serving-cert\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.701906 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/50babc27-1757-4498-abd8-0fd5a1033b80-audit-policies\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.702053 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.702380 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.702763 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.703137 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50babc27-1757-4498-abd8-0fd5a1033b80-serving-cert\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.703176 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/50babc27-1757-4498-abd8-0fd5a1033b80-encryption-config\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.703878 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.704333 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.704516 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-console-oauth-config\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.705239 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.705419 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.705492 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/50babc27-1757-4498-abd8-0fd5a1033b80-etcd-client\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.707138 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3c6e0f76-e954-4f89-a2c8-fccbe4440171-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-85vcd\" (UID: \"3c6e0f76-e954-4f89-a2c8-fccbe4440171\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-85vcd" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.707498 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.754022 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.773872 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.794084 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.798607 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.798691 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0e953287-8cf8-4561-8a48-731746910551-bound-sa-token\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.798741 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c46e6576-9c19-4782-9466-baeb95106d1f-proxy-tls\") pod \"machine-config-controller-84d6567774-j7nnq\" (UID: \"c46e6576-9c19-4782-9466-baeb95106d1f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-j7nnq" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.798821 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c46e6576-9c19-4782-9466-baeb95106d1f-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-j7nnq\" (UID: \"c46e6576-9c19-4782-9466-baeb95106d1f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-j7nnq" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.798852 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0e953287-8cf8-4561-8a48-731746910551-trusted-ca\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.798868 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/96f7b7cd-943f-4725-8cdb-9b411455cf64-proxy-tls\") pod \"machine-config-operator-74547568cd-rpltr\" (UID: \"96f7b7cd-943f-4725-8cdb-9b411455cf64\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.798929 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/96f7b7cd-943f-4725-8cdb-9b411455cf64-auth-proxy-config\") pod \"machine-config-operator-74547568cd-rpltr\" (UID: \"96f7b7cd-943f-4725-8cdb-9b411455cf64\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.799118 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/96f7b7cd-943f-4725-8cdb-9b411455cf64-images\") pod \"machine-config-operator-74547568cd-rpltr\" (UID: \"96f7b7cd-943f-4725-8cdb-9b411455cf64\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.799194 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0e953287-8cf8-4561-8a48-731746910551-ca-trust-extracted\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.799215 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvrsn\" (UniqueName: \"kubernetes.io/projected/96f7b7cd-943f-4725-8cdb-9b411455cf64-kube-api-access-bvrsn\") pod \"machine-config-operator-74547568cd-rpltr\" (UID: \"96f7b7cd-943f-4725-8cdb-9b411455cf64\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.799273 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0e953287-8cf8-4561-8a48-731746910551-registry-tls\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.799294 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52r6g\" (UniqueName: \"kubernetes.io/projected/0e953287-8cf8-4561-8a48-731746910551-kube-api-access-52r6g\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.799336 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0e953287-8cf8-4561-8a48-731746910551-registry-certificates\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.799475 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0e953287-8cf8-4561-8a48-731746910551-installation-pull-secrets\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.799565 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhrqn\" (UniqueName: \"kubernetes.io/projected/c46e6576-9c19-4782-9466-baeb95106d1f-kube-api-access-hhrqn\") pod \"machine-config-controller-84d6567774-j7nnq\" (UID: \"c46e6576-9c19-4782-9466-baeb95106d1f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-j7nnq" Nov 25 23:01:34 crc kubenswrapper[5045]: E1125 23:01:34.799887 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:35.299871351 +0000 UTC m=+151.657530463 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.814734 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.833771 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.854633 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.873659 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.893802 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.901091 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:34 crc kubenswrapper[5045]: E1125 23:01:34.901263 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:35.401226691 +0000 UTC m=+151.758885843 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.901397 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.901488 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/e4cff035-51e5-443d-9bb7-0b6ac6505e20-etcd-ca\") pod \"etcd-operator-b45778765-jjrzz\" (UID: \"e4cff035-51e5-443d-9bb7-0b6ac6505e20\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.901560 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/e4cff035-51e5-443d-9bb7-0b6ac6505e20-etcd-service-ca\") pod \"etcd-operator-b45778765-jjrzz\" (UID: \"e4cff035-51e5-443d-9bb7-0b6ac6505e20\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.901601 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/471a4a28-2882-4e5f-a47a-695be9853e3d-node-bootstrap-token\") pod \"machine-config-server-dgl6n\" (UID: \"471a4a28-2882-4e5f-a47a-695be9853e3d\") " pod="openshift-machine-config-operator/machine-config-server-dgl6n" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.901643 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0e953287-8cf8-4561-8a48-731746910551-bound-sa-token\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.901677 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/063c151b-ad3f-4ce3-b076-0b6b335634d0-cert\") pod \"ingress-canary-z5ldf\" (UID: \"063c151b-ad3f-4ce3-b076-0b6b335634d0\") " pod="openshift-ingress-canary/ingress-canary-z5ldf" Nov 25 23:01:34 crc kubenswrapper[5045]: E1125 23:01:34.902202 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:35.40218207 +0000 UTC m=+151.759841212 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.902460 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c46e6576-9c19-4782-9466-baeb95106d1f-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-j7nnq\" (UID: \"c46e6576-9c19-4782-9466-baeb95106d1f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-j7nnq" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.903116 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2ffc\" (UniqueName: \"kubernetes.io/projected/114e3666-4983-493e-96d9-25bc57d7a849-kube-api-access-g2ffc\") pod \"ingress-operator-5b745b69d9-nzmnq\" (UID: \"114e3666-4983-493e-96d9-25bc57d7a849\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.903598 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0e953287-8cf8-4561-8a48-731746910551-trusted-ca\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.905618 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0e953287-8cf8-4561-8a48-731746910551-trusted-ca\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.905781 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmztf\" (UniqueName: \"kubernetes.io/projected/eeaa9de0-e8a8-4612-a1ec-98a5af5589b1-kube-api-access-tmztf\") pod \"packageserver-d55dfcdfc-52v8s\" (UID: \"eeaa9de0-e8a8-4612-a1ec-98a5af5589b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.905901 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c768642d-19e9-4c69-af8b-9758b06fd298-metrics-tls\") pod \"dns-operator-744455d44c-nlsrs\" (UID: \"c768642d-19e9-4c69-af8b-9758b06fd298\") " pod="openshift-dns-operator/dns-operator-744455d44c-nlsrs" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.905975 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bd77ca91-d866-4735-abdd-5309f8ab6709-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-7q4w2\" (UID: \"bd77ca91-d866-4735-abdd-5309f8ab6709\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7q4w2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.906020 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/7ce2064c-bf8f-45e8-a5bc-4b19774a54fd-csi-data-dir\") pod \"csi-hostpathplugin-wxk7s\" (UID: \"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd\") " pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.906283 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bd77ca91-d866-4735-abdd-5309f8ab6709-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-7q4w2\" (UID: \"bd77ca91-d866-4735-abdd-5309f8ab6709\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7q4w2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.906444 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/049f814f-4648-411b-b692-bf19c3066c8a-config\") pod \"machine-approver-56656f9798-fb55z\" (UID: \"049f814f-4648-411b-b692-bf19c3066c8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fb55z" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.906496 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/defb4b32-105c-4e11-8d80-1b482fd18f4c-service-ca-bundle\") pod \"router-default-5444994796-tnf9q\" (UID: \"defb4b32-105c-4e11-8d80-1b482fd18f4c\") " pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.906563 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2h5b9\" (UniqueName: \"kubernetes.io/projected/c768642d-19e9-4c69-af8b-9758b06fd298-kube-api-access-2h5b9\") pod \"dns-operator-744455d44c-nlsrs\" (UID: \"c768642d-19e9-4c69-af8b-9758b06fd298\") " pod="openshift-dns-operator/dns-operator-744455d44c-nlsrs" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.906625 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/96f7b7cd-943f-4725-8cdb-9b411455cf64-images\") pod \"machine-config-operator-74547568cd-rpltr\" (UID: \"96f7b7cd-943f-4725-8cdb-9b411455cf64\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.906693 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfz47\" (UniqueName: \"kubernetes.io/projected/4288fa59-0a2c-4041-a659-34c7956b0685-kube-api-access-tfz47\") pod \"dns-default-hbpfm\" (UID: \"4288fa59-0a2c-4041-a659-34c7956b0685\") " pod="openshift-dns/dns-default-hbpfm" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.906782 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lcfcx\" (UniqueName: \"kubernetes.io/projected/dad5d45e-7679-4473-890e-d974d55f4b94-kube-api-access-lcfcx\") pod \"service-ca-9c57cc56f-ml4r4\" (UID: \"dad5d45e-7679-4473-890e-d974d55f4b94\") " pod="openshift-service-ca/service-ca-9c57cc56f-ml4r4" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.906832 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/eeaa9de0-e8a8-4612-a1ec-98a5af5589b1-webhook-cert\") pod \"packageserver-d55dfcdfc-52v8s\" (UID: \"eeaa9de0-e8a8-4612-a1ec-98a5af5589b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.906887 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0e953287-8cf8-4561-8a48-731746910551-ca-trust-extracted\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.906945 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/114e3666-4983-493e-96d9-25bc57d7a849-bound-sa-token\") pod \"ingress-operator-5b745b69d9-nzmnq\" (UID: \"114e3666-4983-493e-96d9-25bc57d7a849\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.907031 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b08d30b9-014a-4dee-b584-9fa0a2e6e4d8-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-69wcg\" (UID: \"b08d30b9-014a-4dee-b584-9fa0a2e6e4d8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-69wcg" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.907080 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/049f814f-4648-411b-b692-bf19c3066c8a-auth-proxy-config\") pod \"machine-approver-56656f9798-fb55z\" (UID: \"049f814f-4648-411b-b692-bf19c3066c8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fb55z" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.907164 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4cff035-51e5-443d-9bb7-0b6ac6505e20-config\") pod \"etcd-operator-b45778765-jjrzz\" (UID: \"e4cff035-51e5-443d-9bb7-0b6ac6505e20\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.907216 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52r6g\" (UniqueName: \"kubernetes.io/projected/0e953287-8cf8-4561-8a48-731746910551-kube-api-access-52r6g\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.907248 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c46e6576-9c19-4782-9466-baeb95106d1f-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-j7nnq\" (UID: \"c46e6576-9c19-4782-9466-baeb95106d1f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-j7nnq" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.907262 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0e953287-8cf8-4561-8a48-731746910551-registry-certificates\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.907567 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0e953287-8cf8-4561-8a48-731746910551-ca-trust-extracted\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.907668 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2b0da807-d2d7-4e46-af2c-7d1ddecb07ac-profile-collector-cert\") pod \"catalog-operator-68c6474976-hh4jw\" (UID: \"2b0da807-d2d7-4e46-af2c-7d1ddecb07ac\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.907768 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4288fa59-0a2c-4041-a659-34c7956b0685-metrics-tls\") pod \"dns-default-hbpfm\" (UID: \"4288fa59-0a2c-4041-a659-34c7956b0685\") " pod="openshift-dns/dns-default-hbpfm" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.907886 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffzc7\" (UniqueName: \"kubernetes.io/projected/b08d30b9-014a-4dee-b584-9fa0a2e6e4d8-kube-api-access-ffzc7\") pod \"kube-storage-version-migrator-operator-b67b599dd-69wcg\" (UID: \"b08d30b9-014a-4dee-b584-9fa0a2e6e4d8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-69wcg" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.907934 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/96f7b7cd-943f-4725-8cdb-9b411455cf64-images\") pod \"machine-config-operator-74547568cd-rpltr\" (UID: \"96f7b7cd-943f-4725-8cdb-9b411455cf64\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.908057 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/049f814f-4648-411b-b692-bf19c3066c8a-machine-approver-tls\") pod \"machine-approver-56656f9798-fb55z\" (UID: \"049f814f-4648-411b-b692-bf19c3066c8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fb55z" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.908293 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvvwv\" (UniqueName: \"kubernetes.io/projected/31904391-5f4c-428d-b67c-26fccc5070df-kube-api-access-kvvwv\") pod \"olm-operator-6b444d44fb-ldz47\" (UID: \"31904391-5f4c-428d-b67c-26fccc5070df\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ldz47" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.908478 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/defb4b32-105c-4e11-8d80-1b482fd18f4c-default-certificate\") pod \"router-default-5444994796-tnf9q\" (UID: \"defb4b32-105c-4e11-8d80-1b482fd18f4c\") " pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.908946 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r42gd\" (UniqueName: \"kubernetes.io/projected/063c151b-ad3f-4ce3-b076-0b6b335634d0-kube-api-access-r42gd\") pod \"ingress-canary-z5ldf\" (UID: \"063c151b-ad3f-4ce3-b076-0b6b335634d0\") " pod="openshift-ingress-canary/ingress-canary-z5ldf" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.909063 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6fa3c1cc-bb4c-4b8d-a6a2-947b50943567-serving-cert\") pod \"service-ca-operator-777779d784-xz98p\" (UID: \"6fa3c1cc-bb4c-4b8d-a6a2-947b50943567\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xz98p" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.909141 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slbk2\" (UniqueName: \"kubernetes.io/projected/b250dfbe-fa9f-430c-ac18-9f6625fac525-kube-api-access-slbk2\") pod \"package-server-manager-789f6589d5-fs58x\" (UID: \"b250dfbe-fa9f-430c-ac18-9f6625fac525\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fs58x" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.909204 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/31904391-5f4c-428d-b67c-26fccc5070df-profile-collector-cert\") pod \"olm-operator-6b444d44fb-ldz47\" (UID: \"31904391-5f4c-428d-b67c-26fccc5070df\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ldz47" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.909266 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/7ce2064c-bf8f-45e8-a5bc-4b19774a54fd-plugins-dir\") pod \"csi-hostpathplugin-wxk7s\" (UID: \"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd\") " pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.909332 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e466426d-ad8f-46ce-813b-b0276253e555-secret-volume\") pod \"collect-profiles-29401860-j7gf4\" (UID: \"e466426d-ad8f-46ce-813b-b0276253e555\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.909383 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/7ce2064c-bf8f-45e8-a5bc-4b19774a54fd-registration-dir\") pod \"csi-hostpathplugin-wxk7s\" (UID: \"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd\") " pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.909417 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e466426d-ad8f-46ce-813b-b0276253e555-config-volume\") pod \"collect-profiles-29401860-j7gf4\" (UID: \"e466426d-ad8f-46ce-813b-b0276253e555\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.909496 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhrqn\" (UniqueName: \"kubernetes.io/projected/c46e6576-9c19-4782-9466-baeb95106d1f-kube-api-access-hhrqn\") pod \"machine-config-controller-84d6567774-j7nnq\" (UID: \"c46e6576-9c19-4782-9466-baeb95106d1f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-j7nnq" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.909538 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/7ce2064c-bf8f-45e8-a5bc-4b19774a54fd-mountpoint-dir\") pod \"csi-hostpathplugin-wxk7s\" (UID: \"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd\") " pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.909598 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b08d30b9-014a-4dee-b584-9fa0a2e6e4d8-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-69wcg\" (UID: \"b08d30b9-014a-4dee-b584-9fa0a2e6e4d8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-69wcg" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.909631 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6fa3c1cc-bb4c-4b8d-a6a2-947b50943567-config\") pod \"service-ca-operator-777779d784-xz98p\" (UID: \"6fa3c1cc-bb4c-4b8d-a6a2-947b50943567\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xz98p" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.909663 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/114e3666-4983-493e-96d9-25bc57d7a849-metrics-tls\") pod \"ingress-operator-5b745b69d9-nzmnq\" (UID: \"114e3666-4983-493e-96d9-25bc57d7a849\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.909771 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llxns\" (UniqueName: \"kubernetes.io/projected/7ce2064c-bf8f-45e8-a5bc-4b19774a54fd-kube-api-access-llxns\") pod \"csi-hostpathplugin-wxk7s\" (UID: \"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd\") " pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.909810 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1298d867-c4e4-48a2-b316-72aab5c1cfa4-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-2c9bl\" (UID: \"1298d867-c4e4-48a2-b316-72aab5c1cfa4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2c9bl" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.909856 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/7e777e65-538b-4823-abd9-f6c387f3fba3-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-955fw\" (UID: \"7e777e65-538b-4823-abd9-f6c387f3fba3\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-955fw" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.909907 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2b0da807-d2d7-4e46-af2c-7d1ddecb07ac-srv-cert\") pod \"catalog-operator-68c6474976-hh4jw\" (UID: \"2b0da807-d2d7-4e46-af2c-7d1ddecb07ac\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.909941 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/dad5d45e-7679-4473-890e-d974d55f4b94-signing-cabundle\") pod \"service-ca-9c57cc56f-ml4r4\" (UID: \"dad5d45e-7679-4473-890e-d974d55f4b94\") " pod="openshift-service-ca/service-ca-9c57cc56f-ml4r4" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.909971 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/1c79015c-ed6e-4e37-849b-6cd707d832f7-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-v9nc5\" (UID: \"1c79015c-ed6e-4e37-849b-6cd707d832f7\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v9nc5" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.910013 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd77ca91-d866-4735-abdd-5309f8ab6709-config\") pod \"kube-apiserver-operator-766d6c64bb-7q4w2\" (UID: \"bd77ca91-d866-4735-abdd-5309f8ab6709\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7q4w2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.910045 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76gm6\" (UniqueName: \"kubernetes.io/projected/7e777e65-538b-4823-abd9-f6c387f3fba3-kube-api-access-76gm6\") pod \"control-plane-machine-set-operator-78cbb6b69f-955fw\" (UID: \"7e777e65-538b-4823-abd9-f6c387f3fba3\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-955fw" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.910088 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/7ce2064c-bf8f-45e8-a5bc-4b19774a54fd-socket-dir\") pod \"csi-hostpathplugin-wxk7s\" (UID: \"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd\") " pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.910150 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lngh5\" (UniqueName: \"kubernetes.io/projected/1c79015c-ed6e-4e37-849b-6cd707d832f7-kube-api-access-lngh5\") pod \"multus-admission-controller-857f4d67dd-v9nc5\" (UID: \"1c79015c-ed6e-4e37-849b-6cd707d832f7\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v9nc5" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.910338 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c46e6576-9c19-4782-9466-baeb95106d1f-proxy-tls\") pod \"machine-config-controller-84d6567774-j7nnq\" (UID: \"c46e6576-9c19-4782-9466-baeb95106d1f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-j7nnq" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.910354 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0e953287-8cf8-4561-8a48-731746910551-registry-certificates\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.910399 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmvlt\" (UniqueName: \"kubernetes.io/projected/e4cff035-51e5-443d-9bb7-0b6ac6505e20-kube-api-access-rmvlt\") pod \"etcd-operator-b45778765-jjrzz\" (UID: \"e4cff035-51e5-443d-9bb7-0b6ac6505e20\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.910459 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4h8kc\" (UniqueName: \"kubernetes.io/projected/2b0da807-d2d7-4e46-af2c-7d1ddecb07ac-kube-api-access-4h8kc\") pod \"catalog-operator-68c6474976-hh4jw\" (UID: \"2b0da807-d2d7-4e46-af2c-7d1ddecb07ac\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.910505 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1298d867-c4e4-48a2-b316-72aab5c1cfa4-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-2c9bl\" (UID: \"1298d867-c4e4-48a2-b316-72aab5c1cfa4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2c9bl" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.910560 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/96f7b7cd-943f-4725-8cdb-9b411455cf64-proxy-tls\") pod \"machine-config-operator-74547568cd-rpltr\" (UID: \"96f7b7cd-943f-4725-8cdb-9b411455cf64\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.910641 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xp6r\" (UniqueName: \"kubernetes.io/projected/e466426d-ad8f-46ce-813b-b0276253e555-kube-api-access-5xp6r\") pod \"collect-profiles-29401860-j7gf4\" (UID: \"e466426d-ad8f-46ce-813b-b0276253e555\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.910705 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/96f7b7cd-943f-4725-8cdb-9b411455cf64-auth-proxy-config\") pod \"machine-config-operator-74547568cd-rpltr\" (UID: \"96f7b7cd-943f-4725-8cdb-9b411455cf64\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.910797 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4288fa59-0a2c-4041-a659-34c7956b0685-config-volume\") pod \"dns-default-hbpfm\" (UID: \"4288fa59-0a2c-4041-a659-34c7956b0685\") " pod="openshift-dns/dns-default-hbpfm" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.910840 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/eeaa9de0-e8a8-4612-a1ec-98a5af5589b1-tmpfs\") pod \"packageserver-d55dfcdfc-52v8s\" (UID: \"eeaa9de0-e8a8-4612-a1ec-98a5af5589b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.910874 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62gkr\" (UniqueName: \"kubernetes.io/projected/a7129135-79a3-478d-9ae4-78f7fe46280f-kube-api-access-62gkr\") pod \"marketplace-operator-79b997595-84zml\" (UID: \"a7129135-79a3-478d-9ae4-78f7fe46280f\") " pod="openshift-marketplace/marketplace-operator-79b997595-84zml" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.910935 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/31904391-5f4c-428d-b67c-26fccc5070df-srv-cert\") pod \"olm-operator-6b444d44fb-ldz47\" (UID: \"31904391-5f4c-428d-b67c-26fccc5070df\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ldz47" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.910965 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/dad5d45e-7679-4473-890e-d974d55f4b94-signing-key\") pod \"service-ca-9c57cc56f-ml4r4\" (UID: \"dad5d45e-7679-4473-890e-d974d55f4b94\") " pod="openshift-service-ca/service-ca-9c57cc56f-ml4r4" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.911181 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvrsn\" (UniqueName: \"kubernetes.io/projected/96f7b7cd-943f-4725-8cdb-9b411455cf64-kube-api-access-bvrsn\") pod \"machine-config-operator-74547568cd-rpltr\" (UID: \"96f7b7cd-943f-4725-8cdb-9b411455cf64\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.911245 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/defb4b32-105c-4e11-8d80-1b482fd18f4c-metrics-certs\") pod \"router-default-5444994796-tnf9q\" (UID: \"defb4b32-105c-4e11-8d80-1b482fd18f4c\") " pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.911333 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e4cff035-51e5-443d-9bb7-0b6ac6505e20-etcd-client\") pod \"etcd-operator-b45778765-jjrzz\" (UID: \"e4cff035-51e5-443d-9bb7-0b6ac6505e20\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.911378 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b250dfbe-fa9f-430c-ac18-9f6625fac525-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-fs58x\" (UID: \"b250dfbe-fa9f-430c-ac18-9f6625fac525\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fs58x" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.911418 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0e953287-8cf8-4561-8a48-731746910551-registry-tls\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.911452 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/114e3666-4983-493e-96d9-25bc57d7a849-trusted-ca\") pod \"ingress-operator-5b745b69d9-nzmnq\" (UID: \"114e3666-4983-493e-96d9-25bc57d7a849\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.911487 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/a7129135-79a3-478d-9ae4-78f7fe46280f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-84zml\" (UID: \"a7129135-79a3-478d-9ae4-78f7fe46280f\") " pod="openshift-marketplace/marketplace-operator-79b997595-84zml" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.911543 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/eeaa9de0-e8a8-4612-a1ec-98a5af5589b1-apiservice-cert\") pod \"packageserver-d55dfcdfc-52v8s\" (UID: \"eeaa9de0-e8a8-4612-a1ec-98a5af5589b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.911742 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e4cff035-51e5-443d-9bb7-0b6ac6505e20-serving-cert\") pod \"etcd-operator-b45778765-jjrzz\" (UID: \"e4cff035-51e5-443d-9bb7-0b6ac6505e20\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.911833 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/defb4b32-105c-4e11-8d80-1b482fd18f4c-stats-auth\") pod \"router-default-5444994796-tnf9q\" (UID: \"defb4b32-105c-4e11-8d80-1b482fd18f4c\") " pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.911872 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgb4h\" (UniqueName: \"kubernetes.io/projected/471a4a28-2882-4e5f-a47a-695be9853e3d-kube-api-access-wgb4h\") pod \"machine-config-server-dgl6n\" (UID: \"471a4a28-2882-4e5f-a47a-695be9853e3d\") " pod="openshift-machine-config-operator/machine-config-server-dgl6n" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.911942 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hht46\" (UniqueName: \"kubernetes.io/projected/049f814f-4648-411b-b692-bf19c3066c8a-kube-api-access-hht46\") pod \"machine-approver-56656f9798-fb55z\" (UID: \"049f814f-4648-411b-b692-bf19c3066c8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fb55z" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.911941 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/96f7b7cd-943f-4725-8cdb-9b411455cf64-auth-proxy-config\") pod \"machine-config-operator-74547568cd-rpltr\" (UID: \"96f7b7cd-943f-4725-8cdb-9b411455cf64\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.911996 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qk9hs\" (UniqueName: \"kubernetes.io/projected/defb4b32-105c-4e11-8d80-1b482fd18f4c-kube-api-access-qk9hs\") pod \"router-default-5444994796-tnf9q\" (UID: \"defb4b32-105c-4e11-8d80-1b482fd18f4c\") " pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.912102 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/471a4a28-2882-4e5f-a47a-695be9853e3d-certs\") pod \"machine-config-server-dgl6n\" (UID: \"471a4a28-2882-4e5f-a47a-695be9853e3d\") " pod="openshift-machine-config-operator/machine-config-server-dgl6n" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.912176 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0e953287-8cf8-4561-8a48-731746910551-installation-pull-secrets\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.912211 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4pdqb\" (UniqueName: \"kubernetes.io/projected/6fa3c1cc-bb4c-4b8d-a6a2-947b50943567-kube-api-access-4pdqb\") pod \"service-ca-operator-777779d784-xz98p\" (UID: \"6fa3c1cc-bb4c-4b8d-a6a2-947b50943567\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xz98p" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.912245 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1298d867-c4e4-48a2-b316-72aab5c1cfa4-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-2c9bl\" (UID: \"1298d867-c4e4-48a2-b316-72aab5c1cfa4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2c9bl" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.912367 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a7129135-79a3-478d-9ae4-78f7fe46280f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-84zml\" (UID: \"a7129135-79a3-478d-9ae4-78f7fe46280f\") " pod="openshift-marketplace/marketplace-operator-79b997595-84zml" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.912402 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9kgm\" (UniqueName: \"kubernetes.io/projected/5c90ea55-e258-4fcf-9f33-cbca6398d215-kube-api-access-z9kgm\") pod \"migrator-59844c95c7-t75fl\" (UID: \"5c90ea55-e258-4fcf-9f33-cbca6398d215\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-t75fl" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.914001 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.915464 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/96f7b7cd-943f-4725-8cdb-9b411455cf64-proxy-tls\") pod \"machine-config-operator-74547568cd-rpltr\" (UID: \"96f7b7cd-943f-4725-8cdb-9b411455cf64\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.915992 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0e953287-8cf8-4561-8a48-731746910551-installation-pull-secrets\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.916068 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c46e6576-9c19-4782-9466-baeb95106d1f-proxy-tls\") pod \"machine-config-controller-84d6567774-j7nnq\" (UID: \"c46e6576-9c19-4782-9466-baeb95106d1f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-j7nnq" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.918060 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0e953287-8cf8-4561-8a48-731746910551-registry-tls\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.935193 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.954578 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.990111 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 23:01:34 crc kubenswrapper[5045]: I1125 23:01:34.993316 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.013350 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.013530 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:35.513497776 +0000 UTC m=+151.871156918 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.013591 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a7129135-79a3-478d-9ae4-78f7fe46280f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-84zml\" (UID: \"a7129135-79a3-478d-9ae4-78f7fe46280f\") " pod="openshift-marketplace/marketplace-operator-79b997595-84zml" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.013640 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9kgm\" (UniqueName: \"kubernetes.io/projected/5c90ea55-e258-4fcf-9f33-cbca6398d215-kube-api-access-z9kgm\") pod \"migrator-59844c95c7-t75fl\" (UID: \"5c90ea55-e258-4fcf-9f33-cbca6398d215\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-t75fl" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.013681 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.013774 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/e4cff035-51e5-443d-9bb7-0b6ac6505e20-etcd-ca\") pod \"etcd-operator-b45778765-jjrzz\" (UID: \"e4cff035-51e5-443d-9bb7-0b6ac6505e20\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.013809 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/e4cff035-51e5-443d-9bb7-0b6ac6505e20-etcd-service-ca\") pod \"etcd-operator-b45778765-jjrzz\" (UID: \"e4cff035-51e5-443d-9bb7-0b6ac6505e20\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.013839 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/471a4a28-2882-4e5f-a47a-695be9853e3d-node-bootstrap-token\") pod \"machine-config-server-dgl6n\" (UID: \"471a4a28-2882-4e5f-a47a-695be9853e3d\") " pod="openshift-machine-config-operator/machine-config-server-dgl6n" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.013881 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/063c151b-ad3f-4ce3-b076-0b6b335634d0-cert\") pod \"ingress-canary-z5ldf\" (UID: \"063c151b-ad3f-4ce3-b076-0b6b335634d0\") " pod="openshift-ingress-canary/ingress-canary-z5ldf" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.013933 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2ffc\" (UniqueName: \"kubernetes.io/projected/114e3666-4983-493e-96d9-25bc57d7a849-kube-api-access-g2ffc\") pod \"ingress-operator-5b745b69d9-nzmnq\" (UID: \"114e3666-4983-493e-96d9-25bc57d7a849\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.013977 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmztf\" (UniqueName: \"kubernetes.io/projected/eeaa9de0-e8a8-4612-a1ec-98a5af5589b1-kube-api-access-tmztf\") pod \"packageserver-d55dfcdfc-52v8s\" (UID: \"eeaa9de0-e8a8-4612-a1ec-98a5af5589b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.014221 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c768642d-19e9-4c69-af8b-9758b06fd298-metrics-tls\") pod \"dns-operator-744455d44c-nlsrs\" (UID: \"c768642d-19e9-4c69-af8b-9758b06fd298\") " pod="openshift-dns-operator/dns-operator-744455d44c-nlsrs" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.014374 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bd77ca91-d866-4735-abdd-5309f8ab6709-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-7q4w2\" (UID: \"bd77ca91-d866-4735-abdd-5309f8ab6709\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7q4w2" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.014409 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/7ce2064c-bf8f-45e8-a5bc-4b19774a54fd-csi-data-dir\") pod \"csi-hostpathplugin-wxk7s\" (UID: \"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd\") " pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.014445 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bd77ca91-d866-4735-abdd-5309f8ab6709-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-7q4w2\" (UID: \"bd77ca91-d866-4735-abdd-5309f8ab6709\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7q4w2" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.014477 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/049f814f-4648-411b-b692-bf19c3066c8a-config\") pod \"machine-approver-56656f9798-fb55z\" (UID: \"049f814f-4648-411b-b692-bf19c3066c8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fb55z" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.014508 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/defb4b32-105c-4e11-8d80-1b482fd18f4c-service-ca-bundle\") pod \"router-default-5444994796-tnf9q\" (UID: \"defb4b32-105c-4e11-8d80-1b482fd18f4c\") " pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.014540 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2h5b9\" (UniqueName: \"kubernetes.io/projected/c768642d-19e9-4c69-af8b-9758b06fd298-kube-api-access-2h5b9\") pod \"dns-operator-744455d44c-nlsrs\" (UID: \"c768642d-19e9-4c69-af8b-9758b06fd298\") " pod="openshift-dns-operator/dns-operator-744455d44c-nlsrs" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.014573 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfz47\" (UniqueName: \"kubernetes.io/projected/4288fa59-0a2c-4041-a659-34c7956b0685-kube-api-access-tfz47\") pod \"dns-default-hbpfm\" (UID: \"4288fa59-0a2c-4041-a659-34c7956b0685\") " pod="openshift-dns/dns-default-hbpfm" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.014607 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lcfcx\" (UniqueName: \"kubernetes.io/projected/dad5d45e-7679-4473-890e-d974d55f4b94-kube-api-access-lcfcx\") pod \"service-ca-9c57cc56f-ml4r4\" (UID: \"dad5d45e-7679-4473-890e-d974d55f4b94\") " pod="openshift-service-ca/service-ca-9c57cc56f-ml4r4" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.014637 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/eeaa9de0-e8a8-4612-a1ec-98a5af5589b1-webhook-cert\") pod \"packageserver-d55dfcdfc-52v8s\" (UID: \"eeaa9de0-e8a8-4612-a1ec-98a5af5589b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.014668 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/114e3666-4983-493e-96d9-25bc57d7a849-bound-sa-token\") pod \"ingress-operator-5b745b69d9-nzmnq\" (UID: \"114e3666-4983-493e-96d9-25bc57d7a849\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.014675 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/7ce2064c-bf8f-45e8-a5bc-4b19774a54fd-csi-data-dir\") pod \"csi-hostpathplugin-wxk7s\" (UID: \"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd\") " pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.014703 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b08d30b9-014a-4dee-b584-9fa0a2e6e4d8-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-69wcg\" (UID: \"b08d30b9-014a-4dee-b584-9fa0a2e6e4d8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-69wcg" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.014858 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/049f814f-4648-411b-b692-bf19c3066c8a-auth-proxy-config\") pod \"machine-approver-56656f9798-fb55z\" (UID: \"049f814f-4648-411b-b692-bf19c3066c8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fb55z" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.014951 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4cff035-51e5-443d-9bb7-0b6ac6505e20-config\") pod \"etcd-operator-b45778765-jjrzz\" (UID: \"e4cff035-51e5-443d-9bb7-0b6ac6505e20\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.015031 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2b0da807-d2d7-4e46-af2c-7d1ddecb07ac-profile-collector-cert\") pod \"catalog-operator-68c6474976-hh4jw\" (UID: \"2b0da807-d2d7-4e46-af2c-7d1ddecb07ac\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.015086 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4288fa59-0a2c-4041-a659-34c7956b0685-metrics-tls\") pod \"dns-default-hbpfm\" (UID: \"4288fa59-0a2c-4041-a659-34c7956b0685\") " pod="openshift-dns/dns-default-hbpfm" Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.015113 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:35.515088824 +0000 UTC m=+151.872747976 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.015154 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffzc7\" (UniqueName: \"kubernetes.io/projected/b08d30b9-014a-4dee-b584-9fa0a2e6e4d8-kube-api-access-ffzc7\") pod \"kube-storage-version-migrator-operator-b67b599dd-69wcg\" (UID: \"b08d30b9-014a-4dee-b584-9fa0a2e6e4d8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-69wcg" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.015237 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/049f814f-4648-411b-b692-bf19c3066c8a-machine-approver-tls\") pod \"machine-approver-56656f9798-fb55z\" (UID: \"049f814f-4648-411b-b692-bf19c3066c8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fb55z" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.015359 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvvwv\" (UniqueName: \"kubernetes.io/projected/31904391-5f4c-428d-b67c-26fccc5070df-kube-api-access-kvvwv\") pod \"olm-operator-6b444d44fb-ldz47\" (UID: \"31904391-5f4c-428d-b67c-26fccc5070df\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ldz47" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.015415 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/e4cff035-51e5-443d-9bb7-0b6ac6505e20-etcd-ca\") pod \"etcd-operator-b45778765-jjrzz\" (UID: \"e4cff035-51e5-443d-9bb7-0b6ac6505e20\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.015419 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/defb4b32-105c-4e11-8d80-1b482fd18f4c-default-certificate\") pod \"router-default-5444994796-tnf9q\" (UID: \"defb4b32-105c-4e11-8d80-1b482fd18f4c\") " pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.015510 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.015513 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r42gd\" (UniqueName: \"kubernetes.io/projected/063c151b-ad3f-4ce3-b076-0b6b335634d0-kube-api-access-r42gd\") pod \"ingress-canary-z5ldf\" (UID: \"063c151b-ad3f-4ce3-b076-0b6b335634d0\") " pod="openshift-ingress-canary/ingress-canary-z5ldf" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.015543 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/e4cff035-51e5-443d-9bb7-0b6ac6505e20-etcd-service-ca\") pod \"etcd-operator-b45778765-jjrzz\" (UID: \"e4cff035-51e5-443d-9bb7-0b6ac6505e20\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.015611 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6fa3c1cc-bb4c-4b8d-a6a2-947b50943567-serving-cert\") pod \"service-ca-operator-777779d784-xz98p\" (UID: \"6fa3c1cc-bb4c-4b8d-a6a2-947b50943567\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xz98p" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.015675 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slbk2\" (UniqueName: \"kubernetes.io/projected/b250dfbe-fa9f-430c-ac18-9f6625fac525-kube-api-access-slbk2\") pod \"package-server-manager-789f6589d5-fs58x\" (UID: \"b250dfbe-fa9f-430c-ac18-9f6625fac525\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fs58x" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.015769 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/31904391-5f4c-428d-b67c-26fccc5070df-profile-collector-cert\") pod \"olm-operator-6b444d44fb-ldz47\" (UID: \"31904391-5f4c-428d-b67c-26fccc5070df\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ldz47" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.015832 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/7ce2064c-bf8f-45e8-a5bc-4b19774a54fd-plugins-dir\") pod \"csi-hostpathplugin-wxk7s\" (UID: \"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd\") " pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.015887 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e466426d-ad8f-46ce-813b-b0276253e555-secret-volume\") pod \"collect-profiles-29401860-j7gf4\" (UID: \"e466426d-ad8f-46ce-813b-b0276253e555\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.015973 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4cff035-51e5-443d-9bb7-0b6ac6505e20-config\") pod \"etcd-operator-b45778765-jjrzz\" (UID: \"e4cff035-51e5-443d-9bb7-0b6ac6505e20\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.016019 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/7ce2064c-bf8f-45e8-a5bc-4b19774a54fd-registration-dir\") pod \"csi-hostpathplugin-wxk7s\" (UID: \"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd\") " pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.016071 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e466426d-ad8f-46ce-813b-b0276253e555-config-volume\") pod \"collect-profiles-29401860-j7gf4\" (UID: \"e466426d-ad8f-46ce-813b-b0276253e555\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.016154 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/7ce2064c-bf8f-45e8-a5bc-4b19774a54fd-mountpoint-dir\") pod \"csi-hostpathplugin-wxk7s\" (UID: \"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd\") " pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.016237 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b08d30b9-014a-4dee-b584-9fa0a2e6e4d8-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-69wcg\" (UID: \"b08d30b9-014a-4dee-b584-9fa0a2e6e4d8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-69wcg" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.016288 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6fa3c1cc-bb4c-4b8d-a6a2-947b50943567-config\") pod \"service-ca-operator-777779d784-xz98p\" (UID: \"6fa3c1cc-bb4c-4b8d-a6a2-947b50943567\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xz98p" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.016292 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/7ce2064c-bf8f-45e8-a5bc-4b19774a54fd-plugins-dir\") pod \"csi-hostpathplugin-wxk7s\" (UID: \"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd\") " pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.016341 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/114e3666-4983-493e-96d9-25bc57d7a849-metrics-tls\") pod \"ingress-operator-5b745b69d9-nzmnq\" (UID: \"114e3666-4983-493e-96d9-25bc57d7a849\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.016354 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/7ce2064c-bf8f-45e8-a5bc-4b19774a54fd-mountpoint-dir\") pod \"csi-hostpathplugin-wxk7s\" (UID: \"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd\") " pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.016342 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/7ce2064c-bf8f-45e8-a5bc-4b19774a54fd-registration-dir\") pod \"csi-hostpathplugin-wxk7s\" (UID: \"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd\") " pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.016410 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llxns\" (UniqueName: \"kubernetes.io/projected/7ce2064c-bf8f-45e8-a5bc-4b19774a54fd-kube-api-access-llxns\") pod \"csi-hostpathplugin-wxk7s\" (UID: \"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd\") " pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.016526 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1298d867-c4e4-48a2-b316-72aab5c1cfa4-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-2c9bl\" (UID: \"1298d867-c4e4-48a2-b316-72aab5c1cfa4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2c9bl" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.016745 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/7e777e65-538b-4823-abd9-f6c387f3fba3-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-955fw\" (UID: \"7e777e65-538b-4823-abd9-f6c387f3fba3\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-955fw" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.016834 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2b0da807-d2d7-4e46-af2c-7d1ddecb07ac-srv-cert\") pod \"catalog-operator-68c6474976-hh4jw\" (UID: \"2b0da807-d2d7-4e46-af2c-7d1ddecb07ac\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.016892 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/dad5d45e-7679-4473-890e-d974d55f4b94-signing-cabundle\") pod \"service-ca-9c57cc56f-ml4r4\" (UID: \"dad5d45e-7679-4473-890e-d974d55f4b94\") " pod="openshift-service-ca/service-ca-9c57cc56f-ml4r4" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.016948 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/1c79015c-ed6e-4e37-849b-6cd707d832f7-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-v9nc5\" (UID: \"1c79015c-ed6e-4e37-849b-6cd707d832f7\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v9nc5" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.017019 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd77ca91-d866-4735-abdd-5309f8ab6709-config\") pod \"kube-apiserver-operator-766d6c64bb-7q4w2\" (UID: \"bd77ca91-d866-4735-abdd-5309f8ab6709\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7q4w2" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.017071 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76gm6\" (UniqueName: \"kubernetes.io/projected/7e777e65-538b-4823-abd9-f6c387f3fba3-kube-api-access-76gm6\") pod \"control-plane-machine-set-operator-78cbb6b69f-955fw\" (UID: \"7e777e65-538b-4823-abd9-f6c387f3fba3\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-955fw" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.017136 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/7ce2064c-bf8f-45e8-a5bc-4b19774a54fd-socket-dir\") pod \"csi-hostpathplugin-wxk7s\" (UID: \"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd\") " pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.017190 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lngh5\" (UniqueName: \"kubernetes.io/projected/1c79015c-ed6e-4e37-849b-6cd707d832f7-kube-api-access-lngh5\") pod \"multus-admission-controller-857f4d67dd-v9nc5\" (UID: \"1c79015c-ed6e-4e37-849b-6cd707d832f7\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v9nc5" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.017254 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmvlt\" (UniqueName: \"kubernetes.io/projected/e4cff035-51e5-443d-9bb7-0b6ac6505e20-kube-api-access-rmvlt\") pod \"etcd-operator-b45778765-jjrzz\" (UID: \"e4cff035-51e5-443d-9bb7-0b6ac6505e20\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.017277 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/7ce2064c-bf8f-45e8-a5bc-4b19774a54fd-socket-dir\") pod \"csi-hostpathplugin-wxk7s\" (UID: \"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd\") " pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.017307 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4h8kc\" (UniqueName: \"kubernetes.io/projected/2b0da807-d2d7-4e46-af2c-7d1ddecb07ac-kube-api-access-4h8kc\") pod \"catalog-operator-68c6474976-hh4jw\" (UID: \"2b0da807-d2d7-4e46-af2c-7d1ddecb07ac\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.017358 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1298d867-c4e4-48a2-b316-72aab5c1cfa4-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-2c9bl\" (UID: \"1298d867-c4e4-48a2-b316-72aab5c1cfa4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2c9bl" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.017443 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xp6r\" (UniqueName: \"kubernetes.io/projected/e466426d-ad8f-46ce-813b-b0276253e555-kube-api-access-5xp6r\") pod \"collect-profiles-29401860-j7gf4\" (UID: \"e466426d-ad8f-46ce-813b-b0276253e555\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.017501 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4288fa59-0a2c-4041-a659-34c7956b0685-config-volume\") pod \"dns-default-hbpfm\" (UID: \"4288fa59-0a2c-4041-a659-34c7956b0685\") " pod="openshift-dns/dns-default-hbpfm" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.017550 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/eeaa9de0-e8a8-4612-a1ec-98a5af5589b1-tmpfs\") pod \"packageserver-d55dfcdfc-52v8s\" (UID: \"eeaa9de0-e8a8-4612-a1ec-98a5af5589b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.017604 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62gkr\" (UniqueName: \"kubernetes.io/projected/a7129135-79a3-478d-9ae4-78f7fe46280f-kube-api-access-62gkr\") pod \"marketplace-operator-79b997595-84zml\" (UID: \"a7129135-79a3-478d-9ae4-78f7fe46280f\") " pod="openshift-marketplace/marketplace-operator-79b997595-84zml" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.017685 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/31904391-5f4c-428d-b67c-26fccc5070df-srv-cert\") pod \"olm-operator-6b444d44fb-ldz47\" (UID: \"31904391-5f4c-428d-b67c-26fccc5070df\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ldz47" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.017770 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/dad5d45e-7679-4473-890e-d974d55f4b94-signing-key\") pod \"service-ca-9c57cc56f-ml4r4\" (UID: \"dad5d45e-7679-4473-890e-d974d55f4b94\") " pod="openshift-service-ca/service-ca-9c57cc56f-ml4r4" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.017873 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/defb4b32-105c-4e11-8d80-1b482fd18f4c-metrics-certs\") pod \"router-default-5444994796-tnf9q\" (UID: \"defb4b32-105c-4e11-8d80-1b482fd18f4c\") " pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.017928 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e4cff035-51e5-443d-9bb7-0b6ac6505e20-etcd-client\") pod \"etcd-operator-b45778765-jjrzz\" (UID: \"e4cff035-51e5-443d-9bb7-0b6ac6505e20\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.017592 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a7129135-79a3-478d-9ae4-78f7fe46280f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-84zml\" (UID: \"a7129135-79a3-478d-9ae4-78f7fe46280f\") " pod="openshift-marketplace/marketplace-operator-79b997595-84zml" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.018084 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b250dfbe-fa9f-430c-ac18-9f6625fac525-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-fs58x\" (UID: \"b250dfbe-fa9f-430c-ac18-9f6625fac525\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fs58x" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.018128 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/114e3666-4983-493e-96d9-25bc57d7a849-trusted-ca\") pod \"ingress-operator-5b745b69d9-nzmnq\" (UID: \"114e3666-4983-493e-96d9-25bc57d7a849\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.018166 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/a7129135-79a3-478d-9ae4-78f7fe46280f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-84zml\" (UID: \"a7129135-79a3-478d-9ae4-78f7fe46280f\") " pod="openshift-marketplace/marketplace-operator-79b997595-84zml" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.018183 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/eeaa9de0-e8a8-4612-a1ec-98a5af5589b1-tmpfs\") pod \"packageserver-d55dfcdfc-52v8s\" (UID: \"eeaa9de0-e8a8-4612-a1ec-98a5af5589b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.018212 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/eeaa9de0-e8a8-4612-a1ec-98a5af5589b1-apiservice-cert\") pod \"packageserver-d55dfcdfc-52v8s\" (UID: \"eeaa9de0-e8a8-4612-a1ec-98a5af5589b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.018257 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e4cff035-51e5-443d-9bb7-0b6ac6505e20-serving-cert\") pod \"etcd-operator-b45778765-jjrzz\" (UID: \"e4cff035-51e5-443d-9bb7-0b6ac6505e20\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.018313 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/defb4b32-105c-4e11-8d80-1b482fd18f4c-stats-auth\") pod \"router-default-5444994796-tnf9q\" (UID: \"defb4b32-105c-4e11-8d80-1b482fd18f4c\") " pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.018359 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgb4h\" (UniqueName: \"kubernetes.io/projected/471a4a28-2882-4e5f-a47a-695be9853e3d-kube-api-access-wgb4h\") pod \"machine-config-server-dgl6n\" (UID: \"471a4a28-2882-4e5f-a47a-695be9853e3d\") " pod="openshift-machine-config-operator/machine-config-server-dgl6n" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.018385 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hht46\" (UniqueName: \"kubernetes.io/projected/049f814f-4648-411b-b692-bf19c3066c8a-kube-api-access-hht46\") pod \"machine-approver-56656f9798-fb55z\" (UID: \"049f814f-4648-411b-b692-bf19c3066c8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fb55z" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.018418 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qk9hs\" (UniqueName: \"kubernetes.io/projected/defb4b32-105c-4e11-8d80-1b482fd18f4c-kube-api-access-qk9hs\") pod \"router-default-5444994796-tnf9q\" (UID: \"defb4b32-105c-4e11-8d80-1b482fd18f4c\") " pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.018443 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/471a4a28-2882-4e5f-a47a-695be9853e3d-certs\") pod \"machine-config-server-dgl6n\" (UID: \"471a4a28-2882-4e5f-a47a-695be9853e3d\") " pod="openshift-machine-config-operator/machine-config-server-dgl6n" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.018480 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4pdqb\" (UniqueName: \"kubernetes.io/projected/6fa3c1cc-bb4c-4b8d-a6a2-947b50943567-kube-api-access-4pdqb\") pod \"service-ca-operator-777779d784-xz98p\" (UID: \"6fa3c1cc-bb4c-4b8d-a6a2-947b50943567\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xz98p" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.018499 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1298d867-c4e4-48a2-b316-72aab5c1cfa4-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-2c9bl\" (UID: \"1298d867-c4e4-48a2-b316-72aab5c1cfa4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2c9bl" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.019748 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/114e3666-4983-493e-96d9-25bc57d7a849-trusted-ca\") pod \"ingress-operator-5b745b69d9-nzmnq\" (UID: \"114e3666-4983-493e-96d9-25bc57d7a849\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.020593 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/114e3666-4983-493e-96d9-25bc57d7a849-metrics-tls\") pod \"ingress-operator-5b745b69d9-nzmnq\" (UID: \"114e3666-4983-493e-96d9-25bc57d7a849\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.023697 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/a7129135-79a3-478d-9ae4-78f7fe46280f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-84zml\" (UID: \"a7129135-79a3-478d-9ae4-78f7fe46280f\") " pod="openshift-marketplace/marketplace-operator-79b997595-84zml" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.024219 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e4cff035-51e5-443d-9bb7-0b6ac6505e20-serving-cert\") pod \"etcd-operator-b45778765-jjrzz\" (UID: \"e4cff035-51e5-443d-9bb7-0b6ac6505e20\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.024661 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e4cff035-51e5-443d-9bb7-0b6ac6505e20-etcd-client\") pod \"etcd-operator-b45778765-jjrzz\" (UID: \"e4cff035-51e5-443d-9bb7-0b6ac6505e20\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.034802 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.049548 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bd77ca91-d866-4735-abdd-5309f8ab6709-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-7q4w2\" (UID: \"bd77ca91-d866-4735-abdd-5309f8ab6709\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7q4w2" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.055056 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.074476 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.079454 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd77ca91-d866-4735-abdd-5309f8ab6709-config\") pod \"kube-apiserver-operator-766d6c64bb-7q4w2\" (UID: \"bd77ca91-d866-4735-abdd-5309f8ab6709\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7q4w2" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.095193 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.115536 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.119688 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.119942 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:35.619910951 +0000 UTC m=+151.977570103 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.120659 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.121152 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:35.621134978 +0000 UTC m=+151.978794130 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.134035 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.142595 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/dad5d45e-7679-4473-890e-d974d55f4b94-signing-key\") pod \"service-ca-9c57cc56f-ml4r4\" (UID: \"dad5d45e-7679-4473-890e-d974d55f4b94\") " pod="openshift-service-ca/service-ca-9c57cc56f-ml4r4" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.154227 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.158197 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/dad5d45e-7679-4473-890e-d974d55f4b94-signing-cabundle\") pod \"service-ca-9c57cc56f-ml4r4\" (UID: \"dad5d45e-7679-4473-890e-d974d55f4b94\") " pod="openshift-service-ca/service-ca-9c57cc56f-ml4r4" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.174257 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.193815 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.214487 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.222598 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.223008 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:35.722978083 +0000 UTC m=+152.080637225 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.234695 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.240634 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/049f814f-4648-411b-b692-bf19c3066c8a-machine-approver-tls\") pod \"machine-approver-56656f9798-fb55z\" (UID: \"049f814f-4648-411b-b692-bf19c3066c8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fb55z" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.254545 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.256608 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/049f814f-4648-411b-b692-bf19c3066c8a-auth-proxy-config\") pod \"machine-approver-56656f9798-fb55z\" (UID: \"049f814f-4648-411b-b692-bf19c3066c8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fb55z" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.274373 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.276121 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/049f814f-4648-411b-b692-bf19c3066c8a-config\") pod \"machine-approver-56656f9798-fb55z\" (UID: \"049f814f-4648-411b-b692-bf19c3066c8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fb55z" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.294145 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.314220 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.326250 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.326770 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:35.826745647 +0000 UTC m=+152.184404799 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.334560 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.353922 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.364111 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1298d867-c4e4-48a2-b316-72aab5c1cfa4-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-2c9bl\" (UID: \"1298d867-c4e4-48a2-b316-72aab5c1cfa4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2c9bl" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.374070 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.377833 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1298d867-c4e4-48a2-b316-72aab5c1cfa4-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-2c9bl\" (UID: \"1298d867-c4e4-48a2-b316-72aab5c1cfa4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2c9bl" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.394981 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.402677 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/7e777e65-538b-4823-abd9-f6c387f3fba3-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-955fw\" (UID: \"7e777e65-538b-4823-abd9-f6c387f3fba3\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-955fw" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.413828 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.417454 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b08d30b9-014a-4dee-b584-9fa0a2e6e4d8-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-69wcg\" (UID: \"b08d30b9-014a-4dee-b584-9fa0a2e6e4d8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-69wcg" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.428341 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.428566 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:35.92853852 +0000 UTC m=+152.286197672 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.429449 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.430027 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:35.930007515 +0000 UTC m=+152.287666667 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.434442 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.454089 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.460698 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b08d30b9-014a-4dee-b584-9fa0a2e6e4d8-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-69wcg\" (UID: \"b08d30b9-014a-4dee-b584-9fa0a2e6e4d8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-69wcg" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.474428 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.494800 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.512777 5045 request.go:700] Waited for 1.004175138s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-storage-version-migrator-operator/configmaps?fieldSelector=metadata.name%3Dopenshift-service-ca.crt&limit=500&resourceVersion=0 Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.514705 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.532345 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.532791 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.032751417 +0000 UTC m=+152.390410579 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.533964 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.534309 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.034294824 +0000 UTC m=+152.391953946 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.534586 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.554493 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.557742 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e466426d-ad8f-46ce-813b-b0276253e555-config-volume\") pod \"collect-profiles-29401860-j7gf4\" (UID: \"e466426d-ad8f-46ce-813b-b0276253e555\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.574573 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.581275 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2b0da807-d2d7-4e46-af2c-7d1ddecb07ac-srv-cert\") pod \"catalog-operator-68c6474976-hh4jw\" (UID: \"2b0da807-d2d7-4e46-af2c-7d1ddecb07ac\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.594048 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.596734 5045 configmap.go:193] Couldn't get configMap openshift-controller-manager/client-ca: failed to sync configmap cache: timed out waiting for the condition Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.596877 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-client-ca podName:c9443fc5-a284-4838-a107-6146af9d6bba nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.096848624 +0000 UTC m=+152.454507766 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-client-ca") pod "controller-manager-879f6c89f-mnm5x" (UID: "c9443fc5-a284-4838-a107-6146af9d6bba") : failed to sync configmap cache: timed out waiting for the condition Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.599953 5045 configmap.go:193] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.600013 5045 secret.go:188] Couldn't get secret openshift-controller-manager/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.600050 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0079ce5c-97a3-43a2-8b93-df87ee4de76b-config podName:0079ce5c-97a3-43a2-8b93-df87ee4de76b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.100027281 +0000 UTC m=+152.457686573 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0079ce5c-97a3-43a2-8b93-df87ee4de76b-config") pod "openshift-apiserver-operator-796bbdcf4f-x4djv" (UID: "0079ce5c-97a3-43a2-8b93-df87ee4de76b") : failed to sync configmap cache: timed out waiting for the condition Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.600083 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c9443fc5-a284-4838-a107-6146af9d6bba-serving-cert podName:c9443fc5-a284-4838-a107-6146af9d6bba nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.100064042 +0000 UTC m=+152.457723184 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c9443fc5-a284-4838-a107-6146af9d6bba-serving-cert") pod "controller-manager-879f6c89f-mnm5x" (UID: "c9443fc5-a284-4838-a107-6146af9d6bba") : failed to sync secret cache: timed out waiting for the condition Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.601770 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2b0da807-d2d7-4e46-af2c-7d1ddecb07ac-profile-collector-cert\") pod \"catalog-operator-68c6474976-hh4jw\" (UID: \"2b0da807-d2d7-4e46-af2c-7d1ddecb07ac\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.602117 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e466426d-ad8f-46ce-813b-b0276253e555-secret-volume\") pod \"collect-profiles-29401860-j7gf4\" (UID: \"e466426d-ad8f-46ce-813b-b0276253e555\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.602143 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/31904391-5f4c-428d-b67c-26fccc5070df-profile-collector-cert\") pod \"olm-operator-6b444d44fb-ldz47\" (UID: \"31904391-5f4c-428d-b67c-26fccc5070df\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ldz47" Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.604950 5045 configmap.go:193] Couldn't get configMap openshift-controller-manager/config: failed to sync configmap cache: timed out waiting for the condition Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.605032 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-config podName:c9443fc5-a284-4838-a107-6146af9d6bba nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.105014314 +0000 UTC m=+152.462673466 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-config") pod "controller-manager-879f6c89f-mnm5x" (UID: "c9443fc5-a284-4838-a107-6146af9d6bba") : failed to sync configmap cache: timed out waiting for the condition Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.614401 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.634634 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.634919 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.635090 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.135053436 +0000 UTC m=+152.492712578 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.635931 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.636489 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.136467629 +0000 UTC m=+152.494126781 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.654456 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.674656 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.677064 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/defb4b32-105c-4e11-8d80-1b482fd18f4c-service-ca-bundle\") pod \"router-default-5444994796-tnf9q\" (UID: \"defb4b32-105c-4e11-8d80-1b482fd18f4c\") " pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.694999 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.713785 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.725399 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/defb4b32-105c-4e11-8d80-1b482fd18f4c-stats-auth\") pod \"router-default-5444994796-tnf9q\" (UID: \"defb4b32-105c-4e11-8d80-1b482fd18f4c\") " pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.734341 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.737754 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.737895 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.23786465 +0000 UTC m=+152.595523802 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.738701 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.739302 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.239279213 +0000 UTC m=+152.596938356 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.746644 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/defb4b32-105c-4e11-8d80-1b482fd18f4c-metrics-certs\") pod \"router-default-5444994796-tnf9q\" (UID: \"defb4b32-105c-4e11-8d80-1b482fd18f4c\") " pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.754136 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.763358 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/defb4b32-105c-4e11-8d80-1b482fd18f4c-default-certificate\") pod \"router-default-5444994796-tnf9q\" (UID: \"defb4b32-105c-4e11-8d80-1b482fd18f4c\") " pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.774167 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.794565 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.815803 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.824121 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/31904391-5f4c-428d-b67c-26fccc5070df-srv-cert\") pod \"olm-operator-6b444d44fb-ldz47\" (UID: \"31904391-5f4c-428d-b67c-26fccc5070df\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ldz47" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.836045 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.839565 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.839759 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.339700345 +0000 UTC m=+152.697359497 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.840766 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.841212 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.34118906 +0000 UTC m=+152.698848212 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.844907 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b250dfbe-fa9f-430c-ac18-9f6625fac525-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-fs58x\" (UID: \"b250dfbe-fa9f-430c-ac18-9f6625fac525\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fs58x" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.854962 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.874108 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.894022 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.899402 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c768642d-19e9-4c69-af8b-9758b06fd298-metrics-tls\") pod \"dns-operator-744455d44c-nlsrs\" (UID: \"c768642d-19e9-4c69-af8b-9758b06fd298\") " pod="openshift-dns-operator/dns-operator-744455d44c-nlsrs" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.915474 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.934176 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.941685 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.942029 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.442000133 +0000 UTC m=+152.799659275 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.942938 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:35 crc kubenswrapper[5045]: E1125 23:01:35.943459 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.443436327 +0000 UTC m=+152.801095489 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.954103 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.962600 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/1c79015c-ed6e-4e37-849b-6cd707d832f7-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-v9nc5\" (UID: \"1c79015c-ed6e-4e37-849b-6cd707d832f7\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v9nc5" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.973995 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 23:01:35 crc kubenswrapper[5045]: I1125 23:01:35.994551 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.014166 5045 secret.go:188] Couldn't get secret openshift-machine-config-operator/node-bootstrapper-token: failed to sync secret cache: timed out waiting for the condition Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.014252 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.014275 5045 secret.go:188] Couldn't get secret openshift-ingress-canary/canary-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.014265 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/471a4a28-2882-4e5f-a47a-695be9853e3d-node-bootstrap-token podName:471a4a28-2882-4e5f-a47a-695be9853e3d nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.51423779 +0000 UTC m=+152.871896942 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "node-bootstrap-token" (UniqueName: "kubernetes.io/secret/471a4a28-2882-4e5f-a47a-695be9853e3d-node-bootstrap-token") pod "machine-config-server-dgl6n" (UID: "471a4a28-2882-4e5f-a47a-695be9853e3d") : failed to sync secret cache: timed out waiting for the condition Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.014385 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/063c151b-ad3f-4ce3-b076-0b6b335634d0-cert podName:063c151b-ad3f-4ce3-b076-0b6b335634d0 nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.514357083 +0000 UTC m=+152.872016425 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/063c151b-ad3f-4ce3-b076-0b6b335634d0-cert") pod "ingress-canary-z5ldf" (UID: "063c151b-ad3f-4ce3-b076-0b6b335634d0") : failed to sync secret cache: timed out waiting for the condition Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.016315 5045 secret.go:188] Couldn't get secret openshift-dns/dns-default-metrics-tls: failed to sync secret cache: timed out waiting for the condition Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.016429 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4288fa59-0a2c-4041-a659-34c7956b0685-metrics-tls podName:4288fa59-0a2c-4041-a659-34c7956b0685 nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.516398916 +0000 UTC m=+152.874058098 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/4288fa59-0a2c-4041-a659-34c7956b0685-metrics-tls") pod "dns-default-hbpfm" (UID: "4288fa59-0a2c-4041-a659-34c7956b0685") : failed to sync secret cache: timed out waiting for the condition Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.016482 5045 secret.go:188] Couldn't get secret openshift-service-ca-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.016542 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6fa3c1cc-bb4c-4b8d-a6a2-947b50943567-serving-cert podName:6fa3c1cc-bb4c-4b8d-a6a2-947b50943567 nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.51652445 +0000 UTC m=+152.874183852 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6fa3c1cc-bb4c-4b8d-a6a2-947b50943567-serving-cert") pod "service-ca-operator-777779d784-xz98p" (UID: "6fa3c1cc-bb4c-4b8d-a6a2-947b50943567") : failed to sync secret cache: timed out waiting for the condition Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.016832 5045 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.016920 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/eeaa9de0-e8a8-4612-a1ec-98a5af5589b1-webhook-cert podName:eeaa9de0-e8a8-4612-a1ec-98a5af5589b1 nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.516900581 +0000 UTC m=+152.874559733 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/eeaa9de0-e8a8-4612-a1ec-98a5af5589b1-webhook-cert") pod "packageserver-d55dfcdfc-52v8s" (UID: "eeaa9de0-e8a8-4612-a1ec-98a5af5589b1") : failed to sync secret cache: timed out waiting for the condition Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.017115 5045 configmap.go:193] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.017205 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6fa3c1cc-bb4c-4b8d-a6a2-947b50943567-config podName:6fa3c1cc-bb4c-4b8d-a6a2-947b50943567 nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.51718459 +0000 UTC m=+152.874843742 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6fa3c1cc-bb4c-4b8d-a6a2-947b50943567-config") pod "service-ca-operator-777779d784-xz98p" (UID: "6fa3c1cc-bb4c-4b8d-a6a2-947b50943567") : failed to sync configmap cache: timed out waiting for the condition Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.018530 5045 configmap.go:193] Couldn't get configMap openshift-dns/dns-default: failed to sync configmap cache: timed out waiting for the condition Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.018646 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4288fa59-0a2c-4041-a659-34c7956b0685-config-volume podName:4288fa59-0a2c-4041-a659-34c7956b0685 nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.518627564 +0000 UTC m=+152.876286706 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/4288fa59-0a2c-4041-a659-34c7956b0685-config-volume") pod "dns-default-hbpfm" (UID: "4288fa59-0a2c-4041-a659-34c7956b0685") : failed to sync configmap cache: timed out waiting for the condition Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.019654 5045 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.019754 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/eeaa9de0-e8a8-4612-a1ec-98a5af5589b1-apiservice-cert podName:eeaa9de0-e8a8-4612-a1ec-98a5af5589b1 nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.519736578 +0000 UTC m=+152.877395720 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/eeaa9de0-e8a8-4612-a1ec-98a5af5589b1-apiservice-cert") pod "packageserver-d55dfcdfc-52v8s" (UID: "eeaa9de0-e8a8-4612-a1ec-98a5af5589b1") : failed to sync secret cache: timed out waiting for the condition Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.020883 5045 secret.go:188] Couldn't get secret openshift-machine-config-operator/machine-config-server-tls: failed to sync secret cache: timed out waiting for the condition Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.021027 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/471a4a28-2882-4e5f-a47a-695be9853e3d-certs podName:471a4a28-2882-4e5f-a47a-695be9853e3d nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.520991497 +0000 UTC m=+152.878650639 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "certs" (UniqueName: "kubernetes.io/secret/471a4a28-2882-4e5f-a47a-695be9853e3d-certs") pod "machine-config-server-dgl6n" (UID: "471a4a28-2882-4e5f-a47a-695be9853e3d") : failed to sync secret cache: timed out waiting for the condition Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.035144 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.043784 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.043975 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.543945741 +0000 UTC m=+152.901604893 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.044907 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.045515 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.545486198 +0000 UTC m=+152.903145410 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.053497 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.073557 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.094122 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.113866 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.133690 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.147053 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.147339 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.647292722 +0000 UTC m=+153.004951874 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.147641 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-client-ca\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.147954 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0079ce5c-97a3-43a2-8b93-df87ee4de76b-config\") pod \"openshift-apiserver-operator-796bbdcf4f-x4djv\" (UID: \"0079ce5c-97a3-43a2-8b93-df87ee4de76b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4djv" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.148272 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.148705 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-config\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.148812 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.648750766 +0000 UTC m=+153.006409918 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.149106 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9443fc5-a284-4838-a107-6146af9d6bba-serving-cert\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.155514 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.174801 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.195390 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.214593 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.234805 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.250780 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.250948 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.750917741 +0000 UTC m=+153.108576893 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.251109 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.251592 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.751571131 +0000 UTC m=+153.109230283 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.254062 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.274264 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.295055 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.314709 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.334943 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.352812 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.352978 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.852938221 +0000 UTC m=+153.210597373 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.353932 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.354249 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.354515 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.854493249 +0000 UTC m=+153.212152401 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.374686 5045 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.393831 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.441553 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2v69\" (UniqueName: \"kubernetes.io/projected/299d494f-f519-4793-b234-6fa6174e9428-kube-api-access-z2v69\") pod \"openshift-config-operator-7777fb866f-zzgj8\" (UID: \"299d494f-f519-4793-b234-6fa6174e9428\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.455092 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.455309 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.955281741 +0000 UTC m=+153.312940893 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.455809 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.456371 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:36.956346394 +0000 UTC m=+153.314005546 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.481837 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9w77\" (UniqueName: \"kubernetes.io/projected/4e266b78-e9fa-40bf-844b-7d5e273e988b-kube-api-access-w9w77\") pod \"route-controller-manager-6576b87f9c-h6sq9\" (UID: \"4e266b78-e9fa-40bf-844b-7d5e273e988b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.500877 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/148a8bcd-f16e-4a15-89a2-61a79074bdf4-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-drbtr\" (UID: \"148a8bcd-f16e-4a15-89a2-61a79074bdf4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drbtr" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.506743 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.520906 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvctf\" (UniqueName: \"kubernetes.io/projected/601df050-5421-4266-bf7c-60096a066a24-kube-api-access-dvctf\") pod \"machine-api-operator-5694c8668f-tst4h\" (UID: \"601df050-5421-4266-bf7c-60096a066a24\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tst4h" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.532633 5045 request.go:700] Waited for 1.934594845s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication-operator/serviceaccounts/authentication-operator/token Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.539216 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.542398 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-485bw\" (UniqueName: \"kubernetes.io/projected/b81392a7-2366-421d-834c-d72869a34014-kube-api-access-485bw\") pod \"cluster-samples-operator-665b6dd947-fgz2j\" (UID: \"b81392a7-2366-421d-834c-d72869a34014\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fgz2j" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.554196 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fgz2j" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.558193 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.558432 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:37.058377424 +0000 UTC m=+153.416036596 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.558549 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/eeaa9de0-e8a8-4612-a1ec-98a5af5589b1-webhook-cert\") pod \"packageserver-d55dfcdfc-52v8s\" (UID: \"eeaa9de0-e8a8-4612-a1ec-98a5af5589b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.558603 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4288fa59-0a2c-4041-a659-34c7956b0685-metrics-tls\") pod \"dns-default-hbpfm\" (UID: \"4288fa59-0a2c-4041-a659-34c7956b0685\") " pod="openshift-dns/dns-default-hbpfm" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.558663 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6fa3c1cc-bb4c-4b8d-a6a2-947b50943567-serving-cert\") pod \"service-ca-operator-777779d784-xz98p\" (UID: \"6fa3c1cc-bb4c-4b8d-a6a2-947b50943567\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xz98p" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.558731 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6fa3c1cc-bb4c-4b8d-a6a2-947b50943567-config\") pod \"service-ca-operator-777779d784-xz98p\" (UID: \"6fa3c1cc-bb4c-4b8d-a6a2-947b50943567\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xz98p" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.558906 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4288fa59-0a2c-4041-a659-34c7956b0685-config-volume\") pod \"dns-default-hbpfm\" (UID: \"4288fa59-0a2c-4041-a659-34c7956b0685\") " pod="openshift-dns/dns-default-hbpfm" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.558983 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/eeaa9de0-e8a8-4612-a1ec-98a5af5589b1-apiservice-cert\") pod \"packageserver-d55dfcdfc-52v8s\" (UID: \"eeaa9de0-e8a8-4612-a1ec-98a5af5589b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.559060 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/471a4a28-2882-4e5f-a47a-695be9853e3d-certs\") pod \"machine-config-server-dgl6n\" (UID: \"471a4a28-2882-4e5f-a47a-695be9853e3d\") " pod="openshift-machine-config-operator/machine-config-server-dgl6n" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.559110 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.559138 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/471a4a28-2882-4e5f-a47a-695be9853e3d-node-bootstrap-token\") pod \"machine-config-server-dgl6n\" (UID: \"471a4a28-2882-4e5f-a47a-695be9853e3d\") " pod="openshift-machine-config-operator/machine-config-server-dgl6n" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.559167 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/063c151b-ad3f-4ce3-b076-0b6b335634d0-cert\") pod \"ingress-canary-z5ldf\" (UID: \"063c151b-ad3f-4ce3-b076-0b6b335634d0\") " pod="openshift-ingress-canary/ingress-canary-z5ldf" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.563115 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6fa3c1cc-bb4c-4b8d-a6a2-947b50943567-serving-cert\") pod \"service-ca-operator-777779d784-xz98p\" (UID: \"6fa3c1cc-bb4c-4b8d-a6a2-947b50943567\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xz98p" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.563321 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6fa3c1cc-bb4c-4b8d-a6a2-947b50943567-config\") pod \"service-ca-operator-777779d784-xz98p\" (UID: \"6fa3c1cc-bb4c-4b8d-a6a2-947b50943567\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xz98p" Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.563602 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:37.063574524 +0000 UTC m=+153.421233736 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.565382 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4288fa59-0a2c-4041-a659-34c7956b0685-config-volume\") pod \"dns-default-hbpfm\" (UID: \"4288fa59-0a2c-4041-a659-34c7956b0685\") " pod="openshift-dns/dns-default-hbpfm" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.565651 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/063c151b-ad3f-4ce3-b076-0b6b335634d0-cert\") pod \"ingress-canary-z5ldf\" (UID: \"063c151b-ad3f-4ce3-b076-0b6b335634d0\") " pod="openshift-ingress-canary/ingress-canary-z5ldf" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.566229 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpcw5\" (UniqueName: \"kubernetes.io/projected/cc7739f6-7c08-44a8-aad4-4ec37a9016f2-kube-api-access-qpcw5\") pod \"authentication-operator-69f744f599-q6hcb\" (UID: \"cc7739f6-7c08-44a8-aad4-4ec37a9016f2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q6hcb" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.566325 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/eeaa9de0-e8a8-4612-a1ec-98a5af5589b1-apiservice-cert\") pod \"packageserver-d55dfcdfc-52v8s\" (UID: \"eeaa9de0-e8a8-4612-a1ec-98a5af5589b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.567151 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/eeaa9de0-e8a8-4612-a1ec-98a5af5589b1-webhook-cert\") pod \"packageserver-d55dfcdfc-52v8s\" (UID: \"eeaa9de0-e8a8-4612-a1ec-98a5af5589b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.567527 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4288fa59-0a2c-4041-a659-34c7956b0685-metrics-tls\") pod \"dns-default-hbpfm\" (UID: \"4288fa59-0a2c-4041-a659-34c7956b0685\") " pod="openshift-dns/dns-default-hbpfm" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.569674 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/471a4a28-2882-4e5f-a47a-695be9853e3d-node-bootstrap-token\") pod \"machine-config-server-dgl6n\" (UID: \"471a4a28-2882-4e5f-a47a-695be9853e3d\") " pod="openshift-machine-config-operator/machine-config-server-dgl6n" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.570546 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/471a4a28-2882-4e5f-a47a-695be9853e3d-certs\") pod \"machine-config-server-dgl6n\" (UID: \"471a4a28-2882-4e5f-a47a-695be9853e3d\") " pod="openshift-machine-config-operator/machine-config-server-dgl6n" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.577445 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7d8q\" (UniqueName: \"kubernetes.io/projected/0079ce5c-97a3-43a2-8b93-df87ee4de76b-kube-api-access-l7d8q\") pod \"openshift-apiserver-operator-796bbdcf4f-x4djv\" (UID: \"0079ce5c-97a3-43a2-8b93-df87ee4de76b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4djv" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.591999 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrsmg\" (UniqueName: \"kubernetes.io/projected/148a8bcd-f16e-4a15-89a2-61a79074bdf4-kube-api-access-zrsmg\") pod \"cluster-image-registry-operator-dc59b4c8b-drbtr\" (UID: \"148a8bcd-f16e-4a15-89a2-61a79074bdf4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drbtr" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.601157 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drbtr" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.613214 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njf66\" (UniqueName: \"kubernetes.io/projected/859fc901-fe58-44e6-b133-9da8193dd02f-kube-api-access-njf66\") pod \"console-operator-58897d9998-4rwm4\" (UID: \"859fc901-fe58-44e6-b133-9da8193dd02f\") " pod="openshift-console-operator/console-operator-58897d9998-4rwm4" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.634677 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwfj4\" (UniqueName: \"kubernetes.io/projected/0ec8737e-20e0-4f51-b134-a60be096e1df-kube-api-access-xwfj4\") pod \"apiserver-76f77b778f-dlljd\" (UID: \"0ec8737e-20e0-4f51-b134-a60be096e1df\") " pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.637524 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-q6hcb" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.660106 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.660341 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:37.16026371 +0000 UTC m=+153.517922842 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.661066 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.661637 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:37.161607282 +0000 UTC m=+153.519266444 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.668665 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/701c2cdf-5d93-4d9d-a2a9-279ed3f62d8e-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-qw6j6\" (UID: \"701c2cdf-5d93-4d9d-a2a9-279ed3f62d8e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qw6j6" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.681172 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fk57t\" (UniqueName: \"kubernetes.io/projected/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-kube-api-access-fk57t\") pod \"console-f9d7485db-gmw7c\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.701867 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqwlr\" (UniqueName: \"kubernetes.io/projected/50babc27-1757-4498-abd8-0fd5a1033b80-kube-api-access-dqwlr\") pod \"apiserver-7bbb656c7d-whfmb\" (UID: \"50babc27-1757-4498-abd8-0fd5a1033b80\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.708973 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2rd4\" (UniqueName: \"kubernetes.io/projected/e59f0f14-0c21-43c9-baaa-bf860aaa16b3-kube-api-access-g2rd4\") pod \"downloads-7954f5f757-ws5pv\" (UID: \"e59f0f14-0c21-43c9-baaa-bf860aaa16b3\") " pod="openshift-console/downloads-7954f5f757-ws5pv" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.732138 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ldfmv\" (UniqueName: \"kubernetes.io/projected/f848dbf6-817b-44d7-b410-7ac266166501-kube-api-access-ldfmv\") pod \"oauth-openshift-558db77b4-2btnr\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.737549 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qw6j6" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.757392 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hh84h\" (UniqueName: \"kubernetes.io/projected/3c6e0f76-e954-4f89-a2c8-fccbe4440171-kube-api-access-hh84h\") pod \"openshift-controller-manager-operator-756b6f6bc6-85vcd\" (UID: \"3c6e0f76-e954-4f89-a2c8-fccbe4440171\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-85vcd" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.763068 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.763848 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:37.263823588 +0000 UTC m=+153.621482720 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.784129 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.785248 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9"] Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.786699 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0e953287-8cf8-4561-8a48-731746910551-bound-sa-token\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.797236 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-tst4h" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.807876 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52r6g\" (UniqueName: \"kubernetes.io/projected/0e953287-8cf8-4561-8a48-731746910551-kube-api-access-52r6g\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.832240 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhrqn\" (UniqueName: \"kubernetes.io/projected/c46e6576-9c19-4782-9466-baeb95106d1f-kube-api-access-hhrqn\") pod \"machine-config-controller-84d6567774-j7nnq\" (UID: \"c46e6576-9c19-4782-9466-baeb95106d1f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-j7nnq" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.864355 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvrsn\" (UniqueName: \"kubernetes.io/projected/96f7b7cd-943f-4725-8cdb-9b411455cf64-kube-api-access-bvrsn\") pod \"machine-config-operator-74547568cd-rpltr\" (UID: \"96f7b7cd-943f-4725-8cdb-9b411455cf64\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.867035 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.867640 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:37.367618002 +0000 UTC m=+153.725277114 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.877344 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9kgm\" (UniqueName: \"kubernetes.io/projected/5c90ea55-e258-4fcf-9f33-cbca6398d215-kube-api-access-z9kgm\") pod \"migrator-59844c95c7-t75fl\" (UID: \"5c90ea55-e258-4fcf-9f33-cbca6398d215\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-t75fl" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.892948 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-t75fl" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.895764 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2ffc\" (UniqueName: \"kubernetes.io/projected/114e3666-4983-493e-96d9-25bc57d7a849-kube-api-access-g2ffc\") pod \"ingress-operator-5b745b69d9-nzmnq\" (UID: \"114e3666-4983-493e-96d9-25bc57d7a849\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.914489 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-4rwm4" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.916829 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmztf\" (UniqueName: \"kubernetes.io/projected/eeaa9de0-e8a8-4612-a1ec-98a5af5589b1-kube-api-access-tmztf\") pod \"packageserver-d55dfcdfc-52v8s\" (UID: \"eeaa9de0-e8a8-4612-a1ec-98a5af5589b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.926048 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.935184 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.940097 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bd77ca91-d866-4735-abdd-5309f8ab6709-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-7q4w2\" (UID: \"bd77ca91-d866-4735-abdd-5309f8ab6709\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7q4w2" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.944948 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.951096 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drbtr"] Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.960235 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.960887 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lcfcx\" (UniqueName: \"kubernetes.io/projected/dad5d45e-7679-4473-890e-d974d55f4b94-kube-api-access-lcfcx\") pod \"service-ca-9c57cc56f-ml4r4\" (UID: \"dad5d45e-7679-4473-890e-d974d55f4b94\") " pod="openshift-service-ca/service-ca-9c57cc56f-ml4r4" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.967581 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:36 crc kubenswrapper[5045]: E1125 23:01:36.968287 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:37.46826378 +0000 UTC m=+153.825922892 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.968330 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-q6hcb"] Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.969761 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfz47\" (UniqueName: \"kubernetes.io/projected/4288fa59-0a2c-4041-a659-34c7956b0685-kube-api-access-tfz47\") pod \"dns-default-hbpfm\" (UID: \"4288fa59-0a2c-4041-a659-34c7956b0685\") " pod="openshift-dns/dns-default-hbpfm" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.975996 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-ws5pv" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.983620 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-85vcd" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.992656 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr" Nov 25 23:01:36 crc kubenswrapper[5045]: I1125 23:01:36.994901 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/114e3666-4983-493e-96d9-25bc57d7a849-bound-sa-token\") pod \"ingress-operator-5b745b69d9-nzmnq\" (UID: \"114e3666-4983-493e-96d9-25bc57d7a849\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.010976 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qw6j6"] Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.014963 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2h5b9\" (UniqueName: \"kubernetes.io/projected/c768642d-19e9-4c69-af8b-9758b06fd298-kube-api-access-2h5b9\") pod \"dns-operator-744455d44c-nlsrs\" (UID: \"c768642d-19e9-4c69-af8b-9758b06fd298\") " pod="openshift-dns-operator/dns-operator-744455d44c-nlsrs" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.024428 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-j7nnq" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.035805 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.040414 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffzc7\" (UniqueName: \"kubernetes.io/projected/b08d30b9-014a-4dee-b584-9fa0a2e6e4d8-kube-api-access-ffzc7\") pod \"kube-storage-version-migrator-operator-b67b599dd-69wcg\" (UID: \"b08d30b9-014a-4dee-b584-9fa0a2e6e4d8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-69wcg" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.058422 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7q4w2" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.064492 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvvwv\" (UniqueName: \"kubernetes.io/projected/31904391-5f4c-428d-b67c-26fccc5070df-kube-api-access-kvvwv\") pod \"olm-operator-6b444d44fb-ldz47\" (UID: \"31904391-5f4c-428d-b67c-26fccc5070df\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ldz47" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.064723 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-ml4r4" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.070402 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:37 crc kubenswrapper[5045]: E1125 23:01:37.070860 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:37.570845027 +0000 UTC m=+153.928504139 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.081109 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fgz2j"] Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.091546 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-69wcg" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.096795 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r42gd\" (UniqueName: \"kubernetes.io/projected/063c151b-ad3f-4ce3-b076-0b6b335634d0-kube-api-access-r42gd\") pod \"ingress-canary-z5ldf\" (UID: \"063c151b-ad3f-4ce3-b076-0b6b335634d0\") " pod="openshift-ingress-canary/ingress-canary-z5ldf" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.106778 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-dlljd"] Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.108526 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-tst4h"] Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.108785 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8"] Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.113902 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slbk2\" (UniqueName: \"kubernetes.io/projected/b250dfbe-fa9f-430c-ac18-9f6625fac525-kube-api-access-slbk2\") pod \"package-server-manager-789f6589d5-fs58x\" (UID: \"b250dfbe-fa9f-430c-ac18-9f6625fac525\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fs58x" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.116102 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llxns\" (UniqueName: \"kubernetes.io/projected/7ce2064c-bf8f-45e8-a5bc-4b19774a54fd-kube-api-access-llxns\") pod \"csi-hostpathplugin-wxk7s\" (UID: \"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd\") " pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.128885 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76gm6\" (UniqueName: \"kubernetes.io/projected/7e777e65-538b-4823-abd9-f6c387f3fba3-kube-api-access-76gm6\") pod \"control-plane-machine-set-operator-78cbb6b69f-955fw\" (UID: \"7e777e65-538b-4823-abd9-f6c387f3fba3\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-955fw" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.146562 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ldz47" Nov 25 23:01:37 crc kubenswrapper[5045]: E1125 23:01:37.149795 5045 configmap.go:193] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 25 23:01:37 crc kubenswrapper[5045]: E1125 23:01:37.149870 5045 configmap.go:193] Couldn't get configMap openshift-controller-manager/config: failed to sync configmap cache: timed out waiting for the condition Nov 25 23:01:37 crc kubenswrapper[5045]: E1125 23:01:37.149917 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0079ce5c-97a3-43a2-8b93-df87ee4de76b-config podName:0079ce5c-97a3-43a2-8b93-df87ee4de76b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:38.149890673 +0000 UTC m=+154.507549775 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0079ce5c-97a3-43a2-8b93-df87ee4de76b-config") pod "openshift-apiserver-operator-796bbdcf4f-x4djv" (UID: "0079ce5c-97a3-43a2-8b93-df87ee4de76b") : failed to sync configmap cache: timed out waiting for the condition Nov 25 23:01:37 crc kubenswrapper[5045]: E1125 23:01:37.149922 5045 configmap.go:193] Couldn't get configMap openshift-controller-manager/client-ca: failed to sync configmap cache: timed out waiting for the condition Nov 25 23:01:37 crc kubenswrapper[5045]: E1125 23:01:37.149941 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-config podName:c9443fc5-a284-4838-a107-6146af9d6bba nodeName:}" failed. No retries permitted until 2025-11-25 23:01:38.149932644 +0000 UTC m=+154.507591756 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-config") pod "controller-manager-879f6c89f-mnm5x" (UID: "c9443fc5-a284-4838-a107-6146af9d6bba") : failed to sync configmap cache: timed out waiting for the condition Nov 25 23:01:37 crc kubenswrapper[5045]: E1125 23:01:37.149812 5045 secret.go:188] Couldn't get secret openshift-controller-manager/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 23:01:37 crc kubenswrapper[5045]: E1125 23:01:37.149977 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-client-ca podName:c9443fc5-a284-4838-a107-6146af9d6bba nodeName:}" failed. No retries permitted until 2025-11-25 23:01:38.149951475 +0000 UTC m=+154.507610757 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-client-ca") pod "controller-manager-879f6c89f-mnm5x" (UID: "c9443fc5-a284-4838-a107-6146af9d6bba") : failed to sync configmap cache: timed out waiting for the condition Nov 25 23:01:37 crc kubenswrapper[5045]: E1125 23:01:37.149994 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c9443fc5-a284-4838-a107-6146af9d6bba-serving-cert podName:c9443fc5-a284-4838-a107-6146af9d6bba nodeName:}" failed. No retries permitted until 2025-11-25 23:01:38.149987956 +0000 UTC m=+154.507647068 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c9443fc5-a284-4838-a107-6146af9d6bba-serving-cert") pod "controller-manager-879f6c89f-mnm5x" (UID: "c9443fc5-a284-4838-a107-6146af9d6bba") : failed to sync secret cache: timed out waiting for the condition Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.150925 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lngh5\" (UniqueName: \"kubernetes.io/projected/1c79015c-ed6e-4e37-849b-6cd707d832f7-kube-api-access-lngh5\") pod \"multus-admission-controller-857f4d67dd-v9nc5\" (UID: \"1c79015c-ed6e-4e37-849b-6cd707d832f7\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v9nc5" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.160206 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fs58x" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.165401 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-nlsrs" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.172050 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:37 crc kubenswrapper[5045]: E1125 23:01:37.172613 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:37.672431344 +0000 UTC m=+154.030090456 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.174178 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-v9nc5" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.174207 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmvlt\" (UniqueName: \"kubernetes.io/projected/e4cff035-51e5-443d-9bb7-0b6ac6505e20-kube-api-access-rmvlt\") pod \"etcd-operator-b45778765-jjrzz\" (UID: \"e4cff035-51e5-443d-9bb7-0b6ac6505e20\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.187851 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4h8kc\" (UniqueName: \"kubernetes.io/projected/2b0da807-d2d7-4e46-af2c-7d1ddecb07ac-kube-api-access-4h8kc\") pod \"catalog-operator-68c6474976-hh4jw\" (UID: \"2b0da807-d2d7-4e46-af2c-7d1ddecb07ac\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.206128 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1298d867-c4e4-48a2-b316-72aab5c1cfa4-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-2c9bl\" (UID: \"1298d867-c4e4-48a2-b316-72aab5c1cfa4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2c9bl" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.234275 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xp6r\" (UniqueName: \"kubernetes.io/projected/e466426d-ad8f-46ce-813b-b0276253e555-kube-api-access-5xp6r\") pod \"collect-profiles-29401860-j7gf4\" (UID: \"e466426d-ad8f-46ce-813b-b0276253e555\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.241556 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-hbpfm" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.270034 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-z5ldf" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.273106 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.273232 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62gkr\" (UniqueName: \"kubernetes.io/projected/a7129135-79a3-478d-9ae4-78f7fe46280f-kube-api-access-62gkr\") pod \"marketplace-operator-79b997595-84zml\" (UID: \"a7129135-79a3-478d-9ae4-78f7fe46280f\") " pod="openshift-marketplace/marketplace-operator-79b997595-84zml" Nov 25 23:01:37 crc kubenswrapper[5045]: E1125 23:01:37.273512 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:37.773495955 +0000 UTC m=+154.131155067 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.292230 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.293219 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgb4h\" (UniqueName: \"kubernetes.io/projected/471a4a28-2882-4e5f-a47a-695be9853e3d-kube-api-access-wgb4h\") pod \"machine-config-server-dgl6n\" (UID: \"471a4a28-2882-4e5f-a47a-695be9853e3d\") " pod="openshift-machine-config-operator/machine-config-server-dgl6n" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.311185 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qk9hs\" (UniqueName: \"kubernetes.io/projected/defb4b32-105c-4e11-8d80-1b482fd18f4c-kube-api-access-qk9hs\") pod \"router-default-5444994796-tnf9q\" (UID: \"defb4b32-105c-4e11-8d80-1b482fd18f4c\") " pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.312667 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hht46\" (UniqueName: \"kubernetes.io/projected/049f814f-4648-411b-b692-bf19c3066c8a-kube-api-access-hht46\") pod \"machine-approver-56656f9798-fb55z\" (UID: \"049f814f-4648-411b-b692-bf19c3066c8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fb55z" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.333196 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.334145 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4pdqb\" (UniqueName: \"kubernetes.io/projected/6fa3c1cc-bb4c-4b8d-a6a2-947b50943567-kube-api-access-4pdqb\") pod \"service-ca-operator-777779d784-xz98p\" (UID: \"6fa3c1cc-bb4c-4b8d-a6a2-947b50943567\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xz98p" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.334629 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fgz2j" event={"ID":"b81392a7-2366-421d-834c-d72869a34014","Type":"ContainerStarted","Data":"c0fd2dd32a14a68967e08b87128a9e20e469f0f8f29463eab2fd6571fb0f74b4"} Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.335985 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8" event={"ID":"299d494f-f519-4793-b234-6fa6174e9428","Type":"ContainerStarted","Data":"1ebfa287452734236d3d1a2267969f3d4dae8c601f658c980c6053fd008bf161"} Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.337282 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" event={"ID":"4e266b78-e9fa-40bf-844b-7d5e273e988b","Type":"ContainerStarted","Data":"b60c7ea29e95069c84cf32da2b7cf1c9eed8397d9b2c08a4290f3bb798575976"} Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.337307 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" event={"ID":"4e266b78-e9fa-40bf-844b-7d5e273e988b","Type":"ContainerStarted","Data":"b6ab6a6e5d8a08814ed01b34ba11dd460128d0356877b0c51fd072eafd6ffa46"} Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.337635 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.341489 5045 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-h6sq9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.341573 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" podUID="4e266b78-e9fa-40bf-844b-7d5e273e988b" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.342053 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-q6hcb" event={"ID":"cc7739f6-7c08-44a8-aad4-4ec37a9016f2","Type":"ContainerStarted","Data":"f2691fdcd1f1f5ff3e59d506a72f35c121d633b00cb6d1b5f696cd9e6f2d0a8d"} Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.342096 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-q6hcb" event={"ID":"cc7739f6-7c08-44a8-aad4-4ec37a9016f2","Type":"ContainerStarted","Data":"15aa18cc47a4e2866c03fe78462c4e3185e64d5f80d728fed3ef3117984eed71"} Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.345282 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.346704 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-dlljd" event={"ID":"0ec8737e-20e0-4f51-b134-a60be096e1df","Type":"ContainerStarted","Data":"178a34ff2a0ebf392df3f65ecf90daa38d0f1a5706d3e4b3f006173f45dae9e0"} Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.349761 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qw6j6" event={"ID":"701c2cdf-5d93-4d9d-a2a9-279ed3f62d8e","Type":"ContainerStarted","Data":"bc11dd7dd78d07d47a6bcc7b10f3f845d1528d260e4671cf199f3027bbc66191"} Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.350212 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-84zml" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.355128 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.356853 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drbtr" event={"ID":"148a8bcd-f16e-4a15-89a2-61a79074bdf4","Type":"ContainerStarted","Data":"f20c3a68bf17374c4c0b2d89382b1787a496c8a0522a35d201f48d8688a58d2e"} Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.356891 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drbtr" event={"ID":"148a8bcd-f16e-4a15-89a2-61a79074bdf4","Type":"ContainerStarted","Data":"0ffb29a62b4dbbb9cc36b6afa3e28b23a17b740283505e3ae8dcfab277f0f9a0"} Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.358863 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-tst4h" event={"ID":"601df050-5421-4266-bf7c-60096a066a24","Type":"ContainerStarted","Data":"d110eb3b6748d2b64fb189c19699ccf3e96cb217b016188d407ba9f4d7af4845"} Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.361433 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6x5j\" (UniqueName: \"kubernetes.io/projected/c9443fc5-a284-4838-a107-6146af9d6bba-kube-api-access-v6x5j\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.371699 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fb55z" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.374086 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:37 crc kubenswrapper[5045]: E1125 23:01:37.374303 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:37.874271167 +0000 UTC m=+154.231930269 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.374402 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.374543 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 23:01:37 crc kubenswrapper[5045]: E1125 23:01:37.374905 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:37.874897426 +0000 UTC m=+154.232556538 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.378926 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2c9bl" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.386184 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-955fw" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.394498 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.401929 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.416418 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.434079 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.435275 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.459482 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.462280 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.481565 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:37 crc kubenswrapper[5045]: E1125 23:01:37.481927 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:37.981912369 +0000 UTC m=+154.339571471 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.514215 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-xz98p" Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.548640 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-dgl6n" Nov 25 23:01:37 crc kubenswrapper[5045]: W1125 23:01:37.571203 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddefb4b32_105c_4e11_8d80_1b482fd18f4c.slice/crio-ce134c72f5953d8e75a425db0a38059c2644b54f19babd0a22d84adebf9463a1 WatchSource:0}: Error finding container ce134c72f5953d8e75a425db0a38059c2644b54f19babd0a22d84adebf9463a1: Status 404 returned error can't find the container with id ce134c72f5953d8e75a425db0a38059c2644b54f19babd0a22d84adebf9463a1 Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.586914 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:37 crc kubenswrapper[5045]: E1125 23:01:37.587363 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:38.087344864 +0000 UTC m=+154.445003976 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.689135 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:37 crc kubenswrapper[5045]: E1125 23:01:37.689603 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:38.18954665 +0000 UTC m=+154.547205762 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.790938 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:37 crc kubenswrapper[5045]: E1125 23:01:37.792004 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:38.291975573 +0000 UTC m=+154.649634685 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.892672 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:37 crc kubenswrapper[5045]: E1125 23:01:37.892817 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:38.392796266 +0000 UTC m=+154.750455378 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.893087 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:37 crc kubenswrapper[5045]: E1125 23:01:37.893457 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:38.393447356 +0000 UTC m=+154.751106458 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:37 crc kubenswrapper[5045]: I1125 23:01:37.994108 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:37 crc kubenswrapper[5045]: E1125 23:01:37.994523 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:38.494506856 +0000 UTC m=+154.852165968 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.000855 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-4rwm4"] Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.001591 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s"] Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.019223 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb"] Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.024635 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-ws5pv"] Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.088107 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drbtr" podStartSLOduration=134.088088568 podStartE2EDuration="2m14.088088568s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:38.087005654 +0000 UTC m=+154.444664776" watchObservedRunningTime="2025-11-25 23:01:38.088088568 +0000 UTC m=+154.445747680" Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.095771 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:38 crc kubenswrapper[5045]: E1125 23:01:38.096116 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:38.596102834 +0000 UTC m=+154.953761946 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.211202 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.211440 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-config\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.211471 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9443fc5-a284-4838-a107-6146af9d6bba-serving-cert\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.211498 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-client-ca\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.211528 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0079ce5c-97a3-43a2-8b93-df87ee4de76b-config\") pod \"openshift-apiserver-operator-796bbdcf4f-x4djv\" (UID: \"0079ce5c-97a3-43a2-8b93-df87ee4de76b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4djv" Nov 25 23:01:38 crc kubenswrapper[5045]: E1125 23:01:38.211602 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:38.711560746 +0000 UTC m=+155.069219858 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.213437 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-client-ca\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.217802 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0079ce5c-97a3-43a2-8b93-df87ee4de76b-config\") pod \"openshift-apiserver-operator-796bbdcf4f-x4djv\" (UID: \"0079ce5c-97a3-43a2-8b93-df87ee4de76b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4djv" Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.226111 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9443fc5-a284-4838-a107-6146af9d6bba-serving-cert\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.226433 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-config\") pod \"controller-manager-879f6c89f-mnm5x\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.234073 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.253756 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4djv" Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.258711 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2btnr"] Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.285524 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-gmw7c"] Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.288597 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-t75fl"] Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.299648 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-85vcd"] Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.316953 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:38 crc kubenswrapper[5045]: E1125 23:01:38.317551 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:38.817527596 +0000 UTC m=+155.175186708 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:38 crc kubenswrapper[5045]: W1125 23:01:38.328796 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5c90ea55_e258_4fcf_9f33_cbca6398d215.slice/crio-d95b31c8486861fa09ea70a73be62704be9f34dd50c5d0429efe5192f73937d1 WatchSource:0}: Error finding container d95b31c8486861fa09ea70a73be62704be9f34dd50c5d0429efe5192f73937d1: Status 404 returned error can't find the container with id d95b31c8486861fa09ea70a73be62704be9f34dd50c5d0429efe5192f73937d1 Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.335349 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-q6hcb" podStartSLOduration=134.335303141 podStartE2EDuration="2m14.335303141s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:38.326238593 +0000 UTC m=+154.683897695" watchObservedRunningTime="2025-11-25 23:01:38.335303141 +0000 UTC m=+154.692962253" Nov 25 23:01:38 crc kubenswrapper[5045]: W1125 23:01:38.336106 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2be9acdf_dc72_44e0_8674_ea5ba59cbbaa.slice/crio-3f4957bb5cfdb80bb8ab8923d51c7707aecdf1880ce21782a2759daa02b47e29 WatchSource:0}: Error finding container 3f4957bb5cfdb80bb8ab8923d51c7707aecdf1880ce21782a2759daa02b47e29: Status 404 returned error can't find the container with id 3f4957bb5cfdb80bb8ab8923d51c7707aecdf1880ce21782a2759daa02b47e29 Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.419057 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:38 crc kubenswrapper[5045]: E1125 23:01:38.419374 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:38.919323029 +0000 UTC m=+155.276982131 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.419585 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:38 crc kubenswrapper[5045]: E1125 23:01:38.419888 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:38.919874636 +0000 UTC m=+155.277533748 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.433423 5045 generic.go:334] "Generic (PLEG): container finished" podID="299d494f-f519-4793-b234-6fa6174e9428" containerID="94f6dea73bee06fbc4d8b88920b026db15443312e7ac2177800650248024a754" exitCode=0 Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.434956 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fb55z" event={"ID":"049f814f-4648-411b-b692-bf19c3066c8a","Type":"ContainerStarted","Data":"8aeb2bffb7217ba8a89dcc9c34daa77c6d12275b72db401debde4049e8c782c9"} Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.435020 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8" event={"ID":"299d494f-f519-4793-b234-6fa6174e9428","Type":"ContainerDied","Data":"94f6dea73bee06fbc4d8b88920b026db15443312e7ac2177800650248024a754"} Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.448389 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" event={"ID":"f848dbf6-817b-44d7-b410-7ac266166501","Type":"ContainerStarted","Data":"fb3faeffdf6f61d896fff2332668cec2af39d221d9b982eaa19c827b4c0ca3d9"} Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.456229 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fgz2j" event={"ID":"b81392a7-2366-421d-834c-d72869a34014","Type":"ContainerStarted","Data":"231d017b656d3d048edf7652c4b16e25aa4f293e511770e65f7ffdbf31285626"} Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.463509 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" event={"ID":"eeaa9de0-e8a8-4612-a1ec-98a5af5589b1","Type":"ContainerStarted","Data":"ee637289cb940624c435f26308fc40df9d19cf4aa7400e3768b5829e40560d3d"} Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.498659 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-tst4h" event={"ID":"601df050-5421-4266-bf7c-60096a066a24","Type":"ContainerStarted","Data":"0fea149c27b0b5f4945f961610f4ac09e506f915e27dab70b60cf78813e1464f"} Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.498815 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-tst4h" event={"ID":"601df050-5421-4266-bf7c-60096a066a24","Type":"ContainerStarted","Data":"ce87f0011936c5dd14327b6046851d39ac8c6e45f5efe3d50bcc862dae7f4dee"} Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.520584 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:38 crc kubenswrapper[5045]: E1125 23:01:38.521801 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:39.021784553 +0000 UTC m=+155.379443665 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.526804 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-4rwm4" event={"ID":"859fc901-fe58-44e6-b133-9da8193dd02f","Type":"ContainerStarted","Data":"2a5e792d117d3830a7ed32409fd2931de7d4a40681d307be905a6e23fb0bcde8"} Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.527758 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-4rwm4" Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.530813 5045 generic.go:334] "Generic (PLEG): container finished" podID="0ec8737e-20e0-4f51-b134-a60be096e1df" containerID="81432632b0a7a5aacd4a5534473cfe1b202ae2ff8727803cb14f4ad1f800285f" exitCode=0 Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.530871 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-dlljd" event={"ID":"0ec8737e-20e0-4f51-b134-a60be096e1df","Type":"ContainerDied","Data":"81432632b0a7a5aacd4a5534473cfe1b202ae2ff8727803cb14f4ad1f800285f"} Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.532881 5045 patch_prober.go:28] interesting pod/console-operator-58897d9998-4rwm4 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.532932 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-4rwm4" podUID="859fc901-fe58-44e6-b133-9da8193dd02f" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.534375 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" event={"ID":"50babc27-1757-4498-abd8-0fd5a1033b80","Type":"ContainerStarted","Data":"9402c0bb101573a4029f4912ea9297bc0dcbdd66c029d510967858107d80f7d6"} Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.535750 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-gmw7c" event={"ID":"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa","Type":"ContainerStarted","Data":"3f4957bb5cfdb80bb8ab8923d51c7707aecdf1880ce21782a2759daa02b47e29"} Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.540436 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qw6j6" event={"ID":"701c2cdf-5d93-4d9d-a2a9-279ed3f62d8e","Type":"ContainerStarted","Data":"00d01011dd9ad29f5272762a94c6dffc1c55129002aa922a59efdff0ae047378"} Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.543540 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-dgl6n" event={"ID":"471a4a28-2882-4e5f-a47a-695be9853e3d","Type":"ContainerStarted","Data":"fc114fc102cd663973f5afc218fb9efa0bf70703edfa758bab66b1ab57312f2c"} Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.544601 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-85vcd" event={"ID":"3c6e0f76-e954-4f89-a2c8-fccbe4440171","Type":"ContainerStarted","Data":"ba455e239290f95046b9f4d595a1c672a2b7fea24c90ec38829bfb740a62357b"} Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.549118 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-t75fl" event={"ID":"5c90ea55-e258-4fcf-9f33-cbca6398d215","Type":"ContainerStarted","Data":"d95b31c8486861fa09ea70a73be62704be9f34dd50c5d0429efe5192f73937d1"} Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.551298 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-tnf9q" event={"ID":"defb4b32-105c-4e11-8d80-1b482fd18f4c","Type":"ContainerStarted","Data":"ccf35a1df3c4bcc45f9877198bf1f1195bb34ad75607ac6cc92a7fef0edfe191"} Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.551321 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-tnf9q" event={"ID":"defb4b32-105c-4e11-8d80-1b482fd18f4c","Type":"ContainerStarted","Data":"ce134c72f5953d8e75a425db0a38059c2644b54f19babd0a22d84adebf9463a1"} Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.565215 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-ws5pv" event={"ID":"e59f0f14-0c21-43c9-baaa-bf860aaa16b3","Type":"ContainerStarted","Data":"a52929a66624d44e6ae61ca951d9c8a8ca95d8afaa97571213517929beb3fa2e"} Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.565267 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-ws5pv" event={"ID":"e59f0f14-0c21-43c9-baaa-bf860aaa16b3","Type":"ContainerStarted","Data":"8901dc788cbf8f7c8d8f67d9772ff9ff21817381f402ddf38b2dda499a29905b"} Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.565282 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-ws5pv" Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.572268 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.573941 5045 patch_prober.go:28] interesting pod/downloads-7954f5f757-ws5pv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.573993 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-ws5pv" podUID="e59f0f14-0c21-43c9-baaa-bf860aaa16b3" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.623340 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:38 crc kubenswrapper[5045]: E1125 23:01:38.647994 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:39.147969344 +0000 UTC m=+155.505628636 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.656268 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-jjrzz"] Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.705070 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fs58x"] Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.707576 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-hbpfm"] Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.716250 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-nlsrs"] Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.725274 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:38 crc kubenswrapper[5045]: E1125 23:01:38.725891 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:39.225872345 +0000 UTC m=+155.583531457 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.726327 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr"] Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.735055 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq"] Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.763903 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-69wcg"] Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.777859 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-ml4r4"] Nov 25 23:01:38 crc kubenswrapper[5045]: W1125 23:01:38.786996 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb250dfbe_fa9f_430c_ac18_9f6625fac525.slice/crio-25881f9d4f792792d7edb1e4fb064cf574a306c51b0f3eb8428c676161ad00b1 WatchSource:0}: Error finding container 25881f9d4f792792d7edb1e4fb064cf574a306c51b0f3eb8428c676161ad00b1: Status 404 returned error can't find the container with id 25881f9d4f792792d7edb1e4fb064cf574a306c51b0f3eb8428c676161ad00b1 Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.789068 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-v9nc5"] Nov 25 23:01:38 crc kubenswrapper[5045]: W1125 23:01:38.789742 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc768642d_19e9_4c69_af8b_9758b06fd298.slice/crio-6157f0d2e2355a3ea00c2de6f591f828ba9ab0cc498f548a8bd10e6c68834bfe WatchSource:0}: Error finding container 6157f0d2e2355a3ea00c2de6f591f828ba9ab0cc498f548a8bd10e6c68834bfe: Status 404 returned error can't find the container with id 6157f0d2e2355a3ea00c2de6f591f828ba9ab0cc498f548a8bd10e6c68834bfe Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.790489 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-84zml"] Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.792687 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-j7nnq"] Nov 25 23:01:38 crc kubenswrapper[5045]: W1125 23:01:38.808709 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1c79015c_ed6e_4e37_849b_6cd707d832f7.slice/crio-415297991f160be87e2627d57f95476b6456f55639268f9d3957bb95aa4179f8 WatchSource:0}: Error finding container 415297991f160be87e2627d57f95476b6456f55639268f9d3957bb95aa4179f8: Status 404 returned error can't find the container with id 415297991f160be87e2627d57f95476b6456f55639268f9d3957bb95aa4179f8 Nov 25 23:01:38 crc kubenswrapper[5045]: W1125 23:01:38.827337 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod96f7b7cd_943f_4725_8cdb_9b411455cf64.slice/crio-95491ea9bc3b64aae6296775540b98fc3ea759c026c7607937bf3739b12c286a WatchSource:0}: Error finding container 95491ea9bc3b64aae6296775540b98fc3ea759c026c7607937bf3739b12c286a: Status 404 returned error can't find the container with id 95491ea9bc3b64aae6296775540b98fc3ea759c026c7607937bf3739b12c286a Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.828064 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ldz47"] Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.829357 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:38 crc kubenswrapper[5045]: E1125 23:01:38.831279 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:39.331242827 +0000 UTC m=+155.688901939 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.845402 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7q4w2"] Nov 25 23:01:38 crc kubenswrapper[5045]: W1125 23:01:38.883615 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb08d30b9_014a_4dee_b584_9fa0a2e6e4d8.slice/crio-65a1ac935d42e9ff2322a7ea4d92f69294eec7e48a1a968802fa1490c180dc68 WatchSource:0}: Error finding container 65a1ac935d42e9ff2322a7ea4d92f69294eec7e48a1a968802fa1490c180dc68: Status 404 returned error can't find the container with id 65a1ac935d42e9ff2322a7ea4d92f69294eec7e48a1a968802fa1490c180dc68 Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.925696 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-955fw"] Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.931555 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-wxk7s"] Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.932588 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:38 crc kubenswrapper[5045]: E1125 23:01:38.933494 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:39.433471364 +0000 UTC m=+155.791130486 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.938237 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" podStartSLOduration=133.9382197 podStartE2EDuration="2m13.9382197s" podCreationTimestamp="2025-11-25 22:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:38.935712113 +0000 UTC m=+155.293371225" watchObservedRunningTime="2025-11-25 23:01:38.9382197 +0000 UTC m=+155.295878812" Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.941543 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2c9bl"] Nov 25 23:01:38 crc kubenswrapper[5045]: W1125 23:01:38.942059 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbd77ca91_d866_4735_abdd_5309f8ab6709.slice/crio-e0aed0be13d1d7e63231320c17805c4babb54e983a73d1cd0005b76f5cb92fc1 WatchSource:0}: Error finding container e0aed0be13d1d7e63231320c17805c4babb54e983a73d1cd0005b76f5cb92fc1: Status 404 returned error can't find the container with id e0aed0be13d1d7e63231320c17805c4babb54e983a73d1cd0005b76f5cb92fc1 Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.965955 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-z5ldf"] Nov 25 23:01:38 crc kubenswrapper[5045]: W1125 23:01:38.966958 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ce2064c_bf8f_45e8_a5bc_4b19774a54fd.slice/crio-d29fa23ef201eac0ded9309859d89c0863bc69811086a3ae52ae3db9604afbac WatchSource:0}: Error finding container d29fa23ef201eac0ded9309859d89c0863bc69811086a3ae52ae3db9604afbac: Status 404 returned error can't find the container with id d29fa23ef201eac0ded9309859d89c0863bc69811086a3ae52ae3db9604afbac Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.975328 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4"] Nov 25 23:01:38 crc kubenswrapper[5045]: W1125 23:01:38.990910 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1298d867_c4e4_48a2_b316_72aab5c1cfa4.slice/crio-e8446492bf403c6e762d192742f853eb2f5531644feb8d909ce0235dfbe30d19 WatchSource:0}: Error finding container e8446492bf403c6e762d192742f853eb2f5531644feb8d909ce0235dfbe30d19: Status 404 returned error can't find the container with id e8446492bf403c6e762d192742f853eb2f5531644feb8d909ce0235dfbe30d19 Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.992538 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw"] Nov 25 23:01:38 crc kubenswrapper[5045]: I1125 23:01:38.995200 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-xz98p"] Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.005071 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4djv"] Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.026350 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-mnm5x"] Nov 25 23:01:39 crc kubenswrapper[5045]: W1125 23:01:39.031548 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode466426d_ad8f_46ce_813b_b0276253e555.slice/crio-5c3a574133a754591d134991444568911b9dd6f9b17b6cc22d3c912d5c691725 WatchSource:0}: Error finding container 5c3a574133a754591d134991444568911b9dd6f9b17b6cc22d3c912d5c691725: Status 404 returned error can't find the container with id 5c3a574133a754591d134991444568911b9dd6f9b17b6cc22d3c912d5c691725 Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.035184 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:39 crc kubenswrapper[5045]: E1125 23:01:39.035657 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:39.535640389 +0000 UTC m=+155.893299501 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.136503 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:39 crc kubenswrapper[5045]: E1125 23:01:39.137182 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:39.637165504 +0000 UTC m=+155.994824616 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.206648 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fgz2j" podStartSLOduration=135.206632345 podStartE2EDuration="2m15.206632345s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:39.204479029 +0000 UTC m=+155.562138141" watchObservedRunningTime="2025-11-25 23:01:39.206632345 +0000 UTC m=+155.564291457" Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.238319 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:39 crc kubenswrapper[5045]: E1125 23:01:39.238682 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:39.738668068 +0000 UTC m=+156.096327180 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.331211 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-4rwm4" podStartSLOduration=135.331177026 podStartE2EDuration="2m15.331177026s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:39.325372208 +0000 UTC m=+155.683031330" watchObservedRunningTime="2025-11-25 23:01:39.331177026 +0000 UTC m=+155.688836138" Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.339776 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:39 crc kubenswrapper[5045]: E1125 23:01:39.340142 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:39.84011953 +0000 UTC m=+156.197778642 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.365916 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-tst4h" podStartSLOduration=134.36586795 podStartE2EDuration="2m14.36586795s" podCreationTimestamp="2025-11-25 22:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:39.36586974 +0000 UTC m=+155.723528872" watchObservedRunningTime="2025-11-25 23:01:39.36586795 +0000 UTC m=+155.723527062" Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.414137 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-tnf9q" podStartSLOduration=135.414114571 podStartE2EDuration="2m15.414114571s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:39.413059598 +0000 UTC m=+155.770718710" watchObservedRunningTime="2025-11-25 23:01:39.414114571 +0000 UTC m=+155.771773683" Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.440941 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:39 crc kubenswrapper[5045]: E1125 23:01:39.441329 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:39.941315225 +0000 UTC m=+156.298974337 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.453172 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-ws5pv" podStartSLOduration=135.453154838 podStartE2EDuration="2m15.453154838s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:39.447902367 +0000 UTC m=+155.805561479" watchObservedRunningTime="2025-11-25 23:01:39.453154838 +0000 UTC m=+155.810813950" Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.466892 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.476623 5045 patch_prober.go:28] interesting pod/router-default-5444994796-tnf9q container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 23:01:39 crc kubenswrapper[5045]: [-]has-synced failed: reason withheld Nov 25 23:01:39 crc kubenswrapper[5045]: [+]process-running ok Nov 25 23:01:39 crc kubenswrapper[5045]: healthz check failed Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.476684 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tnf9q" podUID="defb4b32-105c-4e11-8d80-1b482fd18f4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.490649 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qw6j6" podStartSLOduration=135.490631078 podStartE2EDuration="2m15.490631078s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:39.489297577 +0000 UTC m=+155.846956679" watchObservedRunningTime="2025-11-25 23:01:39.490631078 +0000 UTC m=+155.848290190" Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.541490 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:39 crc kubenswrapper[5045]: E1125 23:01:39.541704 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:40.041678984 +0000 UTC m=+156.399338096 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.542155 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:39 crc kubenswrapper[5045]: E1125 23:01:39.542447 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:40.042438448 +0000 UTC m=+156.400097560 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.567408 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-dgl6n" podStartSLOduration=5.567390313 podStartE2EDuration="5.567390313s" podCreationTimestamp="2025-11-25 23:01:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:39.567256989 +0000 UTC m=+155.924916101" watchObservedRunningTime="2025-11-25 23:01:39.567390313 +0000 UTC m=+155.925049425" Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.589806 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-t75fl" event={"ID":"5c90ea55-e258-4fcf-9f33-cbca6398d215","Type":"ContainerStarted","Data":"a05526a5b271b41693db37d352f70a4c099b2ff6fe1bd8779e77315526173e40"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.594724 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-84zml" event={"ID":"a7129135-79a3-478d-9ae4-78f7fe46280f","Type":"ContainerStarted","Data":"017deabd0e828df39fee692cb5a76ecf099b5214a17e4148e69a2bb6b4d1bd85"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.597454 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4djv" event={"ID":"0079ce5c-97a3-43a2-8b93-df87ee4de76b","Type":"ContainerStarted","Data":"a4b385c9e90c253b0689fbf67ae49e7417e5b1f5da576d00364e4917da41cee1"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.607500 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-j7nnq" event={"ID":"c46e6576-9c19-4782-9466-baeb95106d1f","Type":"ContainerStarted","Data":"4cc679bbea6cfc0061ffd9cc61b1c1db9c92d4535e9e463d7ce50704d57421d7"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.607546 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-j7nnq" event={"ID":"c46e6576-9c19-4782-9466-baeb95106d1f","Type":"ContainerStarted","Data":"c0d1d11609902c8968f0895cfb115f8248347be6561bb1b95f6f17caa87161d6"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.629641 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" event={"ID":"f848dbf6-817b-44d7-b410-7ac266166501","Type":"ContainerStarted","Data":"64355fe8ea8dfd40e098d2a8cb939e370a89949e1bd02991894e2adf45135430"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.631538 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.634463 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7q4w2" event={"ID":"bd77ca91-d866-4735-abdd-5309f8ab6709","Type":"ContainerStarted","Data":"e0aed0be13d1d7e63231320c17805c4babb54e983a73d1cd0005b76f5cb92fc1"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.641445 5045 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-2btnr container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.15:6443/healthz\": dial tcp 10.217.0.15:6443: connect: connection refused" start-of-body= Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.641507 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" podUID="f848dbf6-817b-44d7-b410-7ac266166501" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.15:6443/healthz\": dial tcp 10.217.0.15:6443: connect: connection refused" Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.645559 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:39 crc kubenswrapper[5045]: E1125 23:01:39.648408 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:40.148389158 +0000 UTC m=+156.506048270 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.666711 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fgz2j" event={"ID":"b81392a7-2366-421d-834c-d72869a34014","Type":"ContainerStarted","Data":"afd5d983fb50019b197e56a29a18b32482819da432eb5ded3256eca482214f22"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.721201 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8" event={"ID":"299d494f-f519-4793-b234-6fa6174e9428","Type":"ContainerStarted","Data":"e007053e7c45d567726de2f792ccb1a70d1bf1d1450bad8c1577c2bfa6723917"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.722250 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8" Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.749022 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:39 crc kubenswrapper[5045]: E1125 23:01:39.750530 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:40.250512062 +0000 UTC m=+156.608171364 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.750757 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-85vcd" event={"ID":"3c6e0f76-e954-4f89-a2c8-fccbe4440171","Type":"ContainerStarted","Data":"b72e15bbdce3bffe6029b5fa065572687b5c08572a5d62a25789e0a7d5f40c17"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.755956 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw" event={"ID":"2b0da807-d2d7-4e46-af2c-7d1ddecb07ac","Type":"ContainerStarted","Data":"b8f947e31ad539f00877769df8b09ddd155018c12b3bdb26f7fc3f9ad110a679"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.764701 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" event={"ID":"e4cff035-51e5-443d-9bb7-0b6ac6505e20","Type":"ContainerStarted","Data":"555d445fe60f267492dcb1ee9b00c2a20e34d3fd13da55d42f5981f3daa6e782"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.764786 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" event={"ID":"e4cff035-51e5-443d-9bb7-0b6ac6505e20","Type":"ContainerStarted","Data":"ef553748a0c114422d82e7e52d118bf4d820f3ab484e38dd1cf6eb77f6375d49"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.771495 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-69wcg" event={"ID":"b08d30b9-014a-4dee-b584-9fa0a2e6e4d8","Type":"ContainerStarted","Data":"3d55b1786899b451e2bdc871f60a1f9c73feda7c9bdfd0fdde0ac72ea52c2963"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.771552 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-69wcg" event={"ID":"b08d30b9-014a-4dee-b584-9fa0a2e6e4d8","Type":"ContainerStarted","Data":"65a1ac935d42e9ff2322a7ea4d92f69294eec7e48a1a968802fa1490c180dc68"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.797817 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-v9nc5" event={"ID":"1c79015c-ed6e-4e37-849b-6cd707d832f7","Type":"ContainerStarted","Data":"415297991f160be87e2627d57f95476b6456f55639268f9d3957bb95aa4179f8"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.806785 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-dlljd" event={"ID":"0ec8737e-20e0-4f51-b134-a60be096e1df","Type":"ContainerStarted","Data":"05401fc1868401eeb086ce0352b9285871166c2c118b845371c2c180675f257a"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.815308 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" event={"ID":"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd","Type":"ContainerStarted","Data":"d29fa23ef201eac0ded9309859d89c0863bc69811086a3ae52ae3db9604afbac"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.821811 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fs58x" event={"ID":"b250dfbe-fa9f-430c-ac18-9f6625fac525","Type":"ContainerStarted","Data":"f6202d874ea1f790c79afe8391862fe75154cf85f448bec9c5284b7feb387fe3"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.821875 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fs58x" event={"ID":"b250dfbe-fa9f-430c-ac18-9f6625fac525","Type":"ContainerStarted","Data":"25881f9d4f792792d7edb1e4fb064cf574a306c51b0f3eb8428c676161ad00b1"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.827606 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq" event={"ID":"114e3666-4983-493e-96d9-25bc57d7a849","Type":"ContainerStarted","Data":"4cbcfba4ecdbe875fdebaa57079995aadf79ebe4c82ed8ba942df6e9c3f12ef8"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.829359 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-nlsrs" event={"ID":"c768642d-19e9-4c69-af8b-9758b06fd298","Type":"ContainerStarted","Data":"6157f0d2e2355a3ea00c2de6f591f828ba9ab0cc498f548a8bd10e6c68834bfe"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.830963 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-4rwm4" event={"ID":"859fc901-fe58-44e6-b133-9da8193dd02f","Type":"ContainerStarted","Data":"6edc218d2cc73b5b8e1a32bc0c971688f94249130df827686cbdd48e124b6b72"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.832535 5045 patch_prober.go:28] interesting pod/console-operator-58897d9998-4rwm4 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.832578 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-4rwm4" podUID="859fc901-fe58-44e6-b133-9da8193dd02f" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.839224 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2c9bl" event={"ID":"1298d867-c4e4-48a2-b316-72aab5c1cfa4","Type":"ContainerStarted","Data":"e8446492bf403c6e762d192742f853eb2f5531644feb8d909ce0235dfbe30d19"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.845333 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-ml4r4" event={"ID":"dad5d45e-7679-4473-890e-d974d55f4b94","Type":"ContainerStarted","Data":"7c400a50e7bdec56e70b25500f03c6e890ec4037007cd06d59d22325f35418b8"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.846735 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-hbpfm" event={"ID":"4288fa59-0a2c-4041-a659-34c7956b0685","Type":"ContainerStarted","Data":"622924e33931925adb4cda7d6891046f01caadfcfc6f2b9c0faca01a6e3757e6"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.846759 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-hbpfm" event={"ID":"4288fa59-0a2c-4041-a659-34c7956b0685","Type":"ContainerStarted","Data":"e84801fe511c179582d83e373c6c465b96bb6fab57bcabfc4c80c36c073f3dd2"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.849458 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:39 crc kubenswrapper[5045]: E1125 23:01:39.850064 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:40.350041625 +0000 UTC m=+156.707700737 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.853754 5045 generic.go:334] "Generic (PLEG): container finished" podID="50babc27-1757-4498-abd8-0fd5a1033b80" containerID="901636db9e51a7eca43278b248fd71ee8eb134c5e75dbee11d900993962939e4" exitCode=0 Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.854142 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" event={"ID":"50babc27-1757-4498-abd8-0fd5a1033b80","Type":"ContainerDied","Data":"901636db9e51a7eca43278b248fd71ee8eb134c5e75dbee11d900993962939e4"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.857586 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" event={"ID":"eeaa9de0-e8a8-4612-a1ec-98a5af5589b1","Type":"ContainerStarted","Data":"3f1835a2dedc3a7fad4fdac3ae1bee7649ab4186cf2652662c6bec07877bfe3c"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.858477 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.860471 5045 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-52v8s container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:5443/healthz\": dial tcp 10.217.0.34:5443: connect: connection refused" start-of-body= Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.860510 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" podUID="eeaa9de0-e8a8-4612-a1ec-98a5af5589b1" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.34:5443/healthz\": dial tcp 10.217.0.34:5443: connect: connection refused" Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.867039 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4" event={"ID":"e466426d-ad8f-46ce-813b-b0276253e555","Type":"ContainerStarted","Data":"5c3a574133a754591d134991444568911b9dd6f9b17b6cc22d3c912d5c691725"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.880092 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" event={"ID":"c9443fc5-a284-4838-a107-6146af9d6bba","Type":"ContainerStarted","Data":"36bb8fe6c05d7096723062383a9b474849ab1b06e128aa9d8621a00cb434c48f"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.920544 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-z5ldf" event={"ID":"063c151b-ad3f-4ce3-b076-0b6b335634d0","Type":"ContainerStarted","Data":"57acd2250f606f0415a11e571dce60c8f010b7690dca65edb6e89bd5e6fc134b"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.920623 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-z5ldf" event={"ID":"063c151b-ad3f-4ce3-b076-0b6b335634d0","Type":"ContainerStarted","Data":"2fa6f7f3742f89169f7496a81f7ad327f31695ed8ecd9c05f4bfa5e4c21a6407"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.952208 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:39 crc kubenswrapper[5045]: E1125 23:01:39.955000 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:40.454980235 +0000 UTC m=+156.812639347 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.955545 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ldz47" event={"ID":"31904391-5f4c-428d-b67c-26fccc5070df","Type":"ContainerStarted","Data":"6b4442a8fced571f4276ed2c2517bc90c94d72a845a745f9a041ce557fe3781a"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.955639 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ldz47" event={"ID":"31904391-5f4c-428d-b67c-26fccc5070df","Type":"ContainerStarted","Data":"c673496b40fa31dbb3be938d3bfeaedf641d10034a98c96b1326c6ff9ec0a9ad"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.955685 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ldz47" Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.958979 5045 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-ldz47 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" start-of-body= Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.961264 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-gmw7c" event={"ID":"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa","Type":"ContainerStarted","Data":"11c1ba57d78738f2fcfc0a708136b693c7ea7633863ad97c89af909d3272d625"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.962157 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ldz47" podUID="31904391-5f4c-428d-b67c-26fccc5070df" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.963832 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr" event={"ID":"96f7b7cd-943f-4725-8cdb-9b411455cf64","Type":"ContainerStarted","Data":"066b9f2fe3065d1a730a2f98036fa3621241084895e1494a49379da52f170bee"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.963903 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr" event={"ID":"96f7b7cd-943f-4725-8cdb-9b411455cf64","Type":"ContainerStarted","Data":"95491ea9bc3b64aae6296775540b98fc3ea759c026c7607937bf3739b12c286a"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.965321 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-xz98p" event={"ID":"6fa3c1cc-bb4c-4b8d-a6a2-947b50943567","Type":"ContainerStarted","Data":"f057da78e80c61b6e7ab3100cd9efcfb3ae4e2bb8df2965aea1a9f43794a7bb0"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.973599 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fb55z" event={"ID":"049f814f-4648-411b-b692-bf19c3066c8a","Type":"ContainerStarted","Data":"2a0b46fc66609956ff82ad06665ca885dfedb6743165028943c41fc25d08ba00"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.973639 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fb55z" event={"ID":"049f814f-4648-411b-b692-bf19c3066c8a","Type":"ContainerStarted","Data":"e4f84df138f033d26017ea28f51a0460d3b413dfa4a7f1ad493f1aa3acfbf83f"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.983937 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-dgl6n" event={"ID":"471a4a28-2882-4e5f-a47a-695be9853e3d","Type":"ContainerStarted","Data":"e968725a7456d3100bbebf802bd68cc2d5a46e9d9a0b39ffc9c0b301b5e9ccdb"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.989195 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-955fw" event={"ID":"7e777e65-538b-4823-abd9-f6c387f3fba3","Type":"ContainerStarted","Data":"e689be74ca08a701cf0d8f7d4c61c80fa1e0878757f3ab830cd732370df0c3a2"} Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.990096 5045 patch_prober.go:28] interesting pod/downloads-7954f5f757-ws5pv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 25 23:01:39 crc kubenswrapper[5045]: I1125 23:01:39.990143 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-ws5pv" podUID="e59f0f14-0c21-43c9-baaa-bf860aaa16b3" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.055500 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:40 crc kubenswrapper[5045]: E1125 23:01:40.055701 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:40.555656014 +0000 UTC m=+156.913315126 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.056228 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:40 crc kubenswrapper[5045]: E1125 23:01:40.058239 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:40.558229653 +0000 UTC m=+156.915888765 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.159382 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:40 crc kubenswrapper[5045]: E1125 23:01:40.164902 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:40.664856084 +0000 UTC m=+157.022515196 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.262703 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:40 crc kubenswrapper[5045]: E1125 23:01:40.263790 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:40.763769329 +0000 UTC m=+157.121428441 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.363899 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:40 crc kubenswrapper[5045]: E1125 23:01:40.365072 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:40.86483943 +0000 UTC m=+157.222498542 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.366190 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:40 crc kubenswrapper[5045]: E1125 23:01:40.366631 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:40.866613654 +0000 UTC m=+157.224272776 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.403939 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-955fw" podStartSLOduration=135.403914249 podStartE2EDuration="2m15.403914249s" podCreationTimestamp="2025-11-25 22:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:40.403281429 +0000 UTC m=+156.760940551" watchObservedRunningTime="2025-11-25 23:01:40.403914249 +0000 UTC m=+156.761573361" Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.463788 5045 patch_prober.go:28] interesting pod/router-default-5444994796-tnf9q container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 23:01:40 crc kubenswrapper[5045]: [-]has-synced failed: reason withheld Nov 25 23:01:40 crc kubenswrapper[5045]: [+]process-running ok Nov 25 23:01:40 crc kubenswrapper[5045]: healthz check failed Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.463849 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tnf9q" podUID="defb4b32-105c-4e11-8d80-1b482fd18f4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.471143 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:40 crc kubenswrapper[5045]: E1125 23:01:40.472243 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:40.972221004 +0000 UTC m=+157.329880116 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.490239 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-ml4r4" podStartSLOduration=135.490188256 podStartE2EDuration="2m15.490188256s" podCreationTimestamp="2025-11-25 22:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:40.445857825 +0000 UTC m=+156.803516937" watchObservedRunningTime="2025-11-25 23:01:40.490188256 +0000 UTC m=+156.847847368" Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.490910 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8" podStartSLOduration=136.490904858 podStartE2EDuration="2m16.490904858s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:40.488606237 +0000 UTC m=+156.846265349" watchObservedRunningTime="2025-11-25 23:01:40.490904858 +0000 UTC m=+156.848563970" Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.571106 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-gmw7c" podStartSLOduration=136.571086398 podStartE2EDuration="2m16.571086398s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:40.571040866 +0000 UTC m=+156.928699978" watchObservedRunningTime="2025-11-25 23:01:40.571086398 +0000 UTC m=+156.928745510" Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.573999 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:40 crc kubenswrapper[5045]: E1125 23:01:40.574363 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:41.074349708 +0000 UTC m=+157.432008820 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.619098 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-z5ldf" podStartSLOduration=6.61907946 podStartE2EDuration="6.61907946s" podCreationTimestamp="2025-11-25 23:01:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:40.617967636 +0000 UTC m=+156.975626738" watchObservedRunningTime="2025-11-25 23:01:40.61907946 +0000 UTC m=+156.976738572" Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.647346 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-jjrzz" podStartSLOduration=136.647329667 podStartE2EDuration="2m16.647329667s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:40.646028307 +0000 UTC m=+157.003687419" watchObservedRunningTime="2025-11-25 23:01:40.647329667 +0000 UTC m=+157.004988779" Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.677681 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:40 crc kubenswrapper[5045]: E1125 23:01:40.678168 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:41.178153412 +0000 UTC m=+157.535812524 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.729155 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" podStartSLOduration=135.729136787 podStartE2EDuration="2m15.729136787s" podCreationTimestamp="2025-11-25 22:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:40.69208447 +0000 UTC m=+157.049743582" watchObservedRunningTime="2025-11-25 23:01:40.729136787 +0000 UTC m=+157.086795899" Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.771647 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ldz47" podStartSLOduration=135.7716253 podStartE2EDuration="2m15.7716253s" podCreationTimestamp="2025-11-25 22:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:40.730117227 +0000 UTC m=+157.087776339" watchObservedRunningTime="2025-11-25 23:01:40.7716253 +0000 UTC m=+157.129284432" Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.772700 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" podStartSLOduration=136.772694623 podStartE2EDuration="2m16.772694623s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:40.770552897 +0000 UTC m=+157.128212009" watchObservedRunningTime="2025-11-25 23:01:40.772694623 +0000 UTC m=+157.130353735" Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.779483 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:40 crc kubenswrapper[5045]: E1125 23:01:40.779968 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:41.279944436 +0000 UTC m=+157.637603538 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.806228 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fb55z" podStartSLOduration=136.806209991 podStartE2EDuration="2m16.806209991s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:40.80583541 +0000 UTC m=+157.163494532" watchObservedRunningTime="2025-11-25 23:01:40.806209991 +0000 UTC m=+157.163869103" Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.849134 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-85vcd" podStartSLOduration=136.849117848 podStartE2EDuration="2m16.849117848s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:40.845787586 +0000 UTC m=+157.203446698" watchObservedRunningTime="2025-11-25 23:01:40.849117848 +0000 UTC m=+157.206776960" Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.880333 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:40 crc kubenswrapper[5045]: E1125 23:01:40.880513 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:41.38048595 +0000 UTC m=+157.738145062 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.880546 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:40 crc kubenswrapper[5045]: E1125 23:01:40.880879 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:41.380872142 +0000 UTC m=+157.738531244 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.900022 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-69wcg" podStartSLOduration=135.899995999 podStartE2EDuration="2m15.899995999s" podCreationTimestamp="2025-11-25 22:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:40.897125501 +0000 UTC m=+157.254784613" watchObservedRunningTime="2025-11-25 23:01:40.899995999 +0000 UTC m=+157.257655121" Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.982011 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:40 crc kubenswrapper[5045]: E1125 23:01:40.982190 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:41.48215596 +0000 UTC m=+157.839815072 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.982352 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:40 crc kubenswrapper[5045]: E1125 23:01:40.982697 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:41.482685986 +0000 UTC m=+157.840345098 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.995119 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-nlsrs" event={"ID":"c768642d-19e9-4c69-af8b-9758b06fd298","Type":"ContainerStarted","Data":"71e83de2d6b742b7d40a6601220c80e01fa80029350b79b411fb6d46eecc2138"} Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.995166 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-nlsrs" event={"ID":"c768642d-19e9-4c69-af8b-9758b06fd298","Type":"ContainerStarted","Data":"0eadd933baae7b88a993ae36ee5087c4ee48350bdf7dc4f43fe8131bf68d37ec"} Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.996441 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-84zml" event={"ID":"a7129135-79a3-478d-9ae4-78f7fe46280f","Type":"ContainerStarted","Data":"5cac7e5395874d502d01520f2c84a3cf3d2a1ed938361e835d66e662333c6ecf"} Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.996614 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-84zml" Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.998168 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-j7nnq" event={"ID":"c46e6576-9c19-4782-9466-baeb95106d1f","Type":"ContainerStarted","Data":"35fdfa3b23ffdc4e0c233e5389d629f455e769d0e0d9d0e6bd1f53537aa6d55d"} Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.998406 5045 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-84zml container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" start-of-body= Nov 25 23:01:40 crc kubenswrapper[5045]: I1125 23:01:40.998497 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-84zml" podUID="a7129135-79a3-478d-9ae4-78f7fe46280f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.000594 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr" event={"ID":"96f7b7cd-943f-4725-8cdb-9b411455cf64","Type":"ContainerStarted","Data":"152ce89a488c561ccd550f7b26333b63c3ad196608176be0d0c9fae8d817678a"} Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.002381 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq" event={"ID":"114e3666-4983-493e-96d9-25bc57d7a849","Type":"ContainerStarted","Data":"478caf26d58311c8ce84e0cd358b4516cd624b4859e704b40a9104c91a1a4f18"} Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.002413 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq" event={"ID":"114e3666-4983-493e-96d9-25bc57d7a849","Type":"ContainerStarted","Data":"c424b90501d1a7bbc6f391eee3e6ef59030b2f74ef453ca8ebefc8318ad9f237"} Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.004743 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-v9nc5" event={"ID":"1c79015c-ed6e-4e37-849b-6cd707d832f7","Type":"ContainerStarted","Data":"f387b1e0824d980e86e3c21b1fe116ebb556f8185aa64e9ebf86eb8e2e8acac2"} Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.004799 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-v9nc5" event={"ID":"1c79015c-ed6e-4e37-849b-6cd707d832f7","Type":"ContainerStarted","Data":"c498354eaf8ee4a311916f9109e0885506babda2e60af7f27ba0e59344dd5a15"} Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.007582 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" event={"ID":"50babc27-1757-4498-abd8-0fd5a1033b80","Type":"ContainerStarted","Data":"034f93175478aa11aede3754cfa9d4ef1dc7fe95da24cda95b1ddb35fc6b71d8"} Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.009257 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-xz98p" event={"ID":"6fa3c1cc-bb4c-4b8d-a6a2-947b50943567","Type":"ContainerStarted","Data":"0ac4c15293f80ce9688af0705e52c276a087b1334d070c9b5b87e088ab90d0bc"} Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.010730 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7q4w2" event={"ID":"bd77ca91-d866-4735-abdd-5309f8ab6709","Type":"ContainerStarted","Data":"b349f23ce6e2ad262c24fe730f8339a686a54303821ae21a1c2947e923aba616"} Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.012109 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw" event={"ID":"2b0da807-d2d7-4e46-af2c-7d1ddecb07ac","Type":"ContainerStarted","Data":"ade7604546f4dae4bf1b632a4b82d6ab10e72a443859139e8804ea2deabefec0"} Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.012693 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.013576 5045 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-hh4jw container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" start-of-body= Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.013619 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw" podUID="2b0da807-d2d7-4e46-af2c-7d1ddecb07ac" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.014077 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2c9bl" event={"ID":"1298d867-c4e4-48a2-b316-72aab5c1cfa4","Type":"ContainerStarted","Data":"da55cd8b84d6819bb0c4ff8a3b13ff196b3c19e90684cf3f850e53db888ed055"} Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.015472 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-ml4r4" event={"ID":"dad5d45e-7679-4473-890e-d974d55f4b94","Type":"ContainerStarted","Data":"4149d2e17454f5453ad25ea39d5daed05ac6efac4563f38bcaf5257c8ee15720"} Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.018996 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-hbpfm" event={"ID":"4288fa59-0a2c-4041-a659-34c7956b0685","Type":"ContainerStarted","Data":"9cf50d707c69696ef5681bdf168d88dfbad9e0872d1be7878712f20a99f3b5e0"} Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.019760 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-hbpfm" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.021341 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-955fw" event={"ID":"7e777e65-538b-4823-abd9-f6c387f3fba3","Type":"ContainerStarted","Data":"568fb8a8e399599f4768002fa3321f697e21eb694c6281e5cb53220cb111fc18"} Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.022994 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" event={"ID":"c9443fc5-a284-4838-a107-6146af9d6bba","Type":"ContainerStarted","Data":"5ff039174ba6951b34cf2bf8f0c30713d8ba0f816258ffb7914b6ee664b676e2"} Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.023756 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.025234 5045 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-mnm5x container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.025301 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" podUID="c9443fc5-a284-4838-a107-6146af9d6bba" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.028857 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-t75fl" event={"ID":"5c90ea55-e258-4fcf-9f33-cbca6398d215","Type":"ContainerStarted","Data":"698a47c71248f6251481ec5616b34c0cc752881c4b54d1a9ed6169ae3f1b875e"} Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.029328 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-nlsrs" podStartSLOduration=137.029297716 podStartE2EDuration="2m17.029297716s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:41.027579883 +0000 UTC m=+157.385238995" watchObservedRunningTime="2025-11-25 23:01:41.029297716 +0000 UTC m=+157.386956828" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.037028 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-dlljd" event={"ID":"0ec8737e-20e0-4f51-b134-a60be096e1df","Type":"ContainerStarted","Data":"94dd877e3c204bcf8916200606d76e505d64c237616b5658232a04be0a14cc3a"} Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.040142 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4djv" event={"ID":"0079ce5c-97a3-43a2-8b93-df87ee4de76b","Type":"ContainerStarted","Data":"353c296faf185da28fc209c4c6f37a8d63dd35c448cfeef0c7e3a11653d13d73"} Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.043483 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fs58x" event={"ID":"b250dfbe-fa9f-430c-ac18-9f6625fac525","Type":"ContainerStarted","Data":"6db409d0671ad17dd4980dac28864ee5353ce6d3a1a0ad96130ea1e2c221a5f8"} Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.043683 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fs58x" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.045348 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4" event={"ID":"e466426d-ad8f-46ce-813b-b0276253e555","Type":"ContainerStarted","Data":"07f8e9dad9f2efcf3691f68fdf377d08d3cd124f7b2240bd0d98fd897007936e"} Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.047444 5045 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-zzgj8 container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.047498 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8" podUID="299d494f-f519-4793-b234-6fa6174e9428" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.052396 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-v9nc5" podStartSLOduration=136.052380964 podStartE2EDuration="2m16.052380964s" podCreationTimestamp="2025-11-25 22:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:41.051628311 +0000 UTC m=+157.409287433" watchObservedRunningTime="2025-11-25 23:01:41.052380964 +0000 UTC m=+157.410040076" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.056770 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-52v8s" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.081452 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2c9bl" podStartSLOduration=136.081433215 podStartE2EDuration="2m16.081433215s" podCreationTimestamp="2025-11-25 22:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:41.078551447 +0000 UTC m=+157.436210569" watchObservedRunningTime="2025-11-25 23:01:41.081433215 +0000 UTC m=+157.439092327" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.083170 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:41 crc kubenswrapper[5045]: E1125 23:01:41.083255 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:41.583238921 +0000 UTC m=+157.940898023 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.083659 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:41 crc kubenswrapper[5045]: E1125 23:01:41.088463 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:41.588449701 +0000 UTC m=+157.946108813 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.089793 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ldz47" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.169804 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" podStartSLOduration=136.169782096 podStartE2EDuration="2m16.169782096s" podCreationTimestamp="2025-11-25 22:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:41.149161233 +0000 UTC m=+157.506820335" watchObservedRunningTime="2025-11-25 23:01:41.169782096 +0000 UTC m=+157.527441208" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.190982 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:41 crc kubenswrapper[5045]: E1125 23:01:41.191520 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:41.691500923 +0000 UTC m=+158.049160035 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.197027 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-4rwm4" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.208457 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw" podStartSLOduration=136.208431462 podStartE2EDuration="2m16.208431462s" podCreationTimestamp="2025-11-25 22:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:41.173917603 +0000 UTC m=+157.531576715" watchObservedRunningTime="2025-11-25 23:01:41.208431462 +0000 UTC m=+157.566090574" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.232401 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-j7nnq" podStartSLOduration=136.232382747 podStartE2EDuration="2m16.232382747s" podCreationTimestamp="2025-11-25 22:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:41.210243148 +0000 UTC m=+157.567902260" watchObservedRunningTime="2025-11-25 23:01:41.232382747 +0000 UTC m=+157.590041859" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.266460 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-xz98p" podStartSLOduration=136.266428011 podStartE2EDuration="2m16.266428011s" podCreationTimestamp="2025-11-25 22:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:41.265988648 +0000 UTC m=+157.623647760" watchObservedRunningTime="2025-11-25 23:01:41.266428011 +0000 UTC m=+157.624087123" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.266662 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7q4w2" podStartSLOduration=137.266658518 podStartE2EDuration="2m17.266658518s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:41.245171329 +0000 UTC m=+157.602830441" watchObservedRunningTime="2025-11-25 23:01:41.266658518 +0000 UTC m=+157.624317630" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.292457 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:41 crc kubenswrapper[5045]: E1125 23:01:41.292835 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:41.792822891 +0000 UTC m=+158.150482003 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.302001 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rpltr" podStartSLOduration=136.301985362 podStartE2EDuration="2m16.301985362s" podCreationTimestamp="2025-11-25 22:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:41.301692673 +0000 UTC m=+157.659351785" watchObservedRunningTime="2025-11-25 23:01:41.301985362 +0000 UTC m=+157.659644474" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.369411 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" podStartSLOduration=137.36939243 podStartE2EDuration="2m17.36939243s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:41.339039419 +0000 UTC m=+157.696698521" watchObservedRunningTime="2025-11-25 23:01:41.36939243 +0000 UTC m=+157.727051542" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.393040 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:41 crc kubenswrapper[5045]: E1125 23:01:41.393327 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:41.893314664 +0000 UTC m=+158.250973776 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.393857 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-hbpfm" podStartSLOduration=7.393848781 podStartE2EDuration="7.393848781s" podCreationTimestamp="2025-11-25 23:01:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:41.392390486 +0000 UTC m=+157.750049598" watchObservedRunningTime="2025-11-25 23:01:41.393848781 +0000 UTC m=+157.751507893" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.396361 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-84zml" podStartSLOduration=136.396354418 podStartE2EDuration="2m16.396354418s" podCreationTimestamp="2025-11-25 22:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:41.37035129 +0000 UTC m=+157.728010392" watchObservedRunningTime="2025-11-25 23:01:41.396354418 +0000 UTC m=+157.754013530" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.450615 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nzmnq" podStartSLOduration=137.450601462 podStartE2EDuration="2m17.450601462s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:41.443531085 +0000 UTC m=+157.801190197" watchObservedRunningTime="2025-11-25 23:01:41.450601462 +0000 UTC m=+157.808260574" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.462846 5045 patch_prober.go:28] interesting pod/router-default-5444994796-tnf9q container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 23:01:41 crc kubenswrapper[5045]: [-]has-synced failed: reason withheld Nov 25 23:01:41 crc kubenswrapper[5045]: [+]process-running ok Nov 25 23:01:41 crc kubenswrapper[5045]: healthz check failed Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.462885 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tnf9q" podUID="defb4b32-105c-4e11-8d80-1b482fd18f4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.503242 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:41 crc kubenswrapper[5045]: E1125 23:01:41.504077 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:42.004064322 +0000 UTC m=+158.361723434 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.604650 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:41 crc kubenswrapper[5045]: E1125 23:01:41.605179 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:42.105163544 +0000 UTC m=+158.462822656 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.649928 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fs58x" podStartSLOduration=136.649910607 podStartE2EDuration="2m16.649910607s" podCreationTimestamp="2025-11-25 22:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:41.568440427 +0000 UTC m=+157.926099539" watchObservedRunningTime="2025-11-25 23:01:41.649910607 +0000 UTC m=+158.007569719" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.706188 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:41 crc kubenswrapper[5045]: E1125 23:01:41.706468 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:42.206458282 +0000 UTC m=+158.564117384 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.725187 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-dlljd" podStartSLOduration=137.725169016 podStartE2EDuration="2m17.725169016s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:41.697650572 +0000 UTC m=+158.055309684" watchObservedRunningTime="2025-11-25 23:01:41.725169016 +0000 UTC m=+158.082828118" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.787038 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4" podStartSLOduration=101.787022814 podStartE2EDuration="1m41.787022814s" podCreationTimestamp="2025-11-25 23:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:41.758523859 +0000 UTC m=+158.116182971" watchObservedRunningTime="2025-11-25 23:01:41.787022814 +0000 UTC m=+158.144681926" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.787507 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.787808 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.791817 5045 patch_prober.go:28] interesting pod/apiserver-76f77b778f-dlljd container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.791872 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-dlljd" podUID="0ec8737e-20e0-4f51-b134-a60be096e1df" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.806958 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:41 crc kubenswrapper[5045]: E1125 23:01:41.807123 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:42.307100209 +0000 UTC m=+158.664759321 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.807183 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:41 crc kubenswrapper[5045]: E1125 23:01:41.807477 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:42.30747026 +0000 UTC m=+158.665129372 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.875990 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4djv" podStartSLOduration=137.875971482 podStartE2EDuration="2m17.875971482s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:41.823514212 +0000 UTC m=+158.181173324" watchObservedRunningTime="2025-11-25 23:01:41.875971482 +0000 UTC m=+158.233630594" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.908595 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:41 crc kubenswrapper[5045]: E1125 23:01:41.908849 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:42.40883512 +0000 UTC m=+158.766494232 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.936245 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.936436 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.939254 5045 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-whfmb container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.14:8443/livez\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.939300 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" podUID="50babc27-1757-4498-abd8-0fd5a1033b80" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.14:8443/livez\": dial tcp 10.217.0.14:8443: connect: connection refused" Nov 25 23:01:41 crc kubenswrapper[5045]: I1125 23:01:41.979640 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.009755 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:42 crc kubenswrapper[5045]: E1125 23:01:42.010028 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:42.510016415 +0000 UTC m=+158.867675527 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.033742 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-t75fl" podStartSLOduration=137.033708892 podStartE2EDuration="2m17.033708892s" podCreationTimestamp="2025-11-25 22:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:41.878648624 +0000 UTC m=+158.236307736" watchObservedRunningTime="2025-11-25 23:01:42.033708892 +0000 UTC m=+158.391368004" Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.053474 5045 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-hh4jw container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" start-of-body= Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.053833 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw" podUID="2b0da807-d2d7-4e46-af2c-7d1ddecb07ac" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.053456 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" event={"ID":"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd","Type":"ContainerStarted","Data":"e893c2ede143d2d01e3d3f6820699568865fb69cb87ebb04b6947382c741e27f"} Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.054643 5045 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-mnm5x container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.054665 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" podUID="c9443fc5-a284-4838-a107-6146af9d6bba" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.054828 5045 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-84zml container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" start-of-body= Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.054917 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-84zml" podUID="a7129135-79a3-478d-9ae4-78f7fe46280f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.110567 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:42 crc kubenswrapper[5045]: E1125 23:01:42.110889 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:42.610874179 +0000 UTC m=+158.968533291 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.158932 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zzgj8" Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.213110 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:42 crc kubenswrapper[5045]: E1125 23:01:42.221695 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:42.721673328 +0000 UTC m=+159.079332440 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.314938 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:42 crc kubenswrapper[5045]: E1125 23:01:42.315163 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:42.815146057 +0000 UTC m=+159.172805169 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.415752 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:42 crc kubenswrapper[5045]: E1125 23:01:42.416468 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:42.916437345 +0000 UTC m=+159.274096777 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.467615 5045 patch_prober.go:28] interesting pod/router-default-5444994796-tnf9q container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 23:01:42 crc kubenswrapper[5045]: [-]has-synced failed: reason withheld Nov 25 23:01:42 crc kubenswrapper[5045]: [+]process-running ok Nov 25 23:01:42 crc kubenswrapper[5045]: healthz check failed Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.467740 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tnf9q" podUID="defb4b32-105c-4e11-8d80-1b482fd18f4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.516584 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:42 crc kubenswrapper[5045]: E1125 23:01:42.516769 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:43.016741092 +0000 UTC m=+159.374400204 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.517045 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:42 crc kubenswrapper[5045]: E1125 23:01:42.517781 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:43.017749723 +0000 UTC m=+159.375408835 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.581197 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.581809 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.586304 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.588128 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.597568 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.618028 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.618525 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0980cdef-2742-484e-bdb0-9bc92de678b9-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"0980cdef-2742-484e-bdb0-9bc92de678b9\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.618601 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0980cdef-2742-484e-bdb0-9bc92de678b9-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"0980cdef-2742-484e-bdb0-9bc92de678b9\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 23:01:42 crc kubenswrapper[5045]: E1125 23:01:42.618916 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:43.118864935 +0000 UTC m=+159.476524067 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.720025 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0980cdef-2742-484e-bdb0-9bc92de678b9-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"0980cdef-2742-484e-bdb0-9bc92de678b9\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.720070 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.720111 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0980cdef-2742-484e-bdb0-9bc92de678b9-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"0980cdef-2742-484e-bdb0-9bc92de678b9\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.720198 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0980cdef-2742-484e-bdb0-9bc92de678b9-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"0980cdef-2742-484e-bdb0-9bc92de678b9\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 23:01:42 crc kubenswrapper[5045]: E1125 23:01:42.720641 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:43.220630918 +0000 UTC m=+159.578290030 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.746625 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0980cdef-2742-484e-bdb0-9bc92de678b9-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"0980cdef-2742-484e-bdb0-9bc92de678b9\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.820823 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:42 crc kubenswrapper[5045]: E1125 23:01:42.821081 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:43.321065029 +0000 UTC m=+159.678724141 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.902163 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 23:01:42 crc kubenswrapper[5045]: I1125 23:01:42.922107 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:42 crc kubenswrapper[5045]: E1125 23:01:42.922373 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:43.422362367 +0000 UTC m=+159.780021479 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.022436 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:43 crc kubenswrapper[5045]: E1125 23:01:43.022613 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:43.522586702 +0000 UTC m=+159.880245814 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.022820 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:43 crc kubenswrapper[5045]: E1125 23:01:43.023196 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:43.523189421 +0000 UTC m=+159.880848533 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.067282 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" event={"ID":"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd","Type":"ContainerStarted","Data":"9a431ba8313901afe9b8355ee0d35d6c0a36cff62097522b43663c5d4d80a6d8"} Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.087633 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hh4jw" Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.102115 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.128294 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:43 crc kubenswrapper[5045]: E1125 23:01:43.129653 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:43.629636007 +0000 UTC m=+159.987295119 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.232354 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:43 crc kubenswrapper[5045]: E1125 23:01:43.232733 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:43.732698409 +0000 UTC m=+160.090357521 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.334407 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:43 crc kubenswrapper[5045]: E1125 23:01:43.334853 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:43.834830652 +0000 UTC m=+160.192489764 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.440404 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:43 crc kubenswrapper[5045]: E1125 23:01:43.441030 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:43.941018741 +0000 UTC m=+160.298677853 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.441297 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.458146 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.467861 5045 patch_prober.go:28] interesting pod/router-default-5444994796-tnf9q container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 23:01:43 crc kubenswrapper[5045]: [-]has-synced failed: reason withheld Nov 25 23:01:43 crc kubenswrapper[5045]: [+]process-running ok Nov 25 23:01:43 crc kubenswrapper[5045]: healthz check failed Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.467914 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tnf9q" podUID="defb4b32-105c-4e11-8d80-1b482fd18f4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 23:01:43 crc kubenswrapper[5045]: W1125 23:01:43.494095 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod0980cdef_2742_484e_bdb0_9bc92de678b9.slice/crio-4fa02b65a05f01d10f20158b21f9860e4f6027c609680c312c611c0e62853258 WatchSource:0}: Error finding container 4fa02b65a05f01d10f20158b21f9860e4f6027c609680c312c611c0e62853258: Status 404 returned error can't find the container with id 4fa02b65a05f01d10f20158b21f9860e4f6027c609680c312c611c0e62853258 Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.544763 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:43 crc kubenswrapper[5045]: E1125 23:01:43.544961 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:44.044932709 +0000 UTC m=+160.402591821 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.545280 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:43 crc kubenswrapper[5045]: E1125 23:01:43.545558 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:44.045551628 +0000 UTC m=+160.403210740 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.646156 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:43 crc kubenswrapper[5045]: E1125 23:01:43.646309 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:44.146284678 +0000 UTC m=+160.503943780 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.646696 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:43 crc kubenswrapper[5045]: E1125 23:01:43.647027 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:44.147016231 +0000 UTC m=+160.504675343 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.747738 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:43 crc kubenswrapper[5045]: E1125 23:01:43.748072 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:44.248055221 +0000 UTC m=+160.605714333 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.848634 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:43 crc kubenswrapper[5045]: E1125 23:01:43.848972 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:44.348958047 +0000 UTC m=+160.706617159 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.949986 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:43 crc kubenswrapper[5045]: E1125 23:01:43.950146 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:44.45012105 +0000 UTC m=+160.807780162 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:43 crc kubenswrapper[5045]: I1125 23:01:43.950291 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:43 crc kubenswrapper[5045]: E1125 23:01:43.950573 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:44.450560674 +0000 UTC m=+160.808219786 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.038414 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wzn6m"] Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.039321 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wzn6m" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.045290 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.051893 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:44 crc kubenswrapper[5045]: E1125 23:01:44.052035 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:44.552014837 +0000 UTC m=+160.909673949 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.052198 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:44 crc kubenswrapper[5045]: E1125 23:01:44.052482 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:44.552472351 +0000 UTC m=+160.910131463 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.061574 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wzn6m"] Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.092001 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"0980cdef-2742-484e-bdb0-9bc92de678b9","Type":"ContainerStarted","Data":"4fa02b65a05f01d10f20158b21f9860e4f6027c609680c312c611c0e62853258"} Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.098635 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" event={"ID":"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd","Type":"ContainerStarted","Data":"cc15c8faf40b73a7c07775267eef128c76790a571b8f155e9ec27494054fb8ed"} Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.137303 5045 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.153142 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.153347 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfv4q\" (UniqueName: \"kubernetes.io/projected/00bdfdd4-092b-4071-87c1-fb9386f7114e-kube-api-access-pfv4q\") pod \"community-operators-wzn6m\" (UID: \"00bdfdd4-092b-4071-87c1-fb9386f7114e\") " pod="openshift-marketplace/community-operators-wzn6m" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.153475 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00bdfdd4-092b-4071-87c1-fb9386f7114e-utilities\") pod \"community-operators-wzn6m\" (UID: \"00bdfdd4-092b-4071-87c1-fb9386f7114e\") " pod="openshift-marketplace/community-operators-wzn6m" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.153511 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00bdfdd4-092b-4071-87c1-fb9386f7114e-catalog-content\") pod \"community-operators-wzn6m\" (UID: \"00bdfdd4-092b-4071-87c1-fb9386f7114e\") " pod="openshift-marketplace/community-operators-wzn6m" Nov 25 23:01:44 crc kubenswrapper[5045]: E1125 23:01:44.154367 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:44.654350936 +0000 UTC m=+161.012010038 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.229895 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-frnbt"] Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.230900 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-frnbt" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.233191 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.245474 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-frnbt"] Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.254237 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfv4q\" (UniqueName: \"kubernetes.io/projected/00bdfdd4-092b-4071-87c1-fb9386f7114e-kube-api-access-pfv4q\") pod \"community-operators-wzn6m\" (UID: \"00bdfdd4-092b-4071-87c1-fb9386f7114e\") " pod="openshift-marketplace/community-operators-wzn6m" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.254279 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb680718-f140-4525-950a-980e0dc1ed87-utilities\") pod \"certified-operators-frnbt\" (UID: \"eb680718-f140-4525-950a-980e0dc1ed87\") " pod="openshift-marketplace/certified-operators-frnbt" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.254327 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00bdfdd4-092b-4071-87c1-fb9386f7114e-utilities\") pod \"community-operators-wzn6m\" (UID: \"00bdfdd4-092b-4071-87c1-fb9386f7114e\") " pod="openshift-marketplace/community-operators-wzn6m" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.254346 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00bdfdd4-092b-4071-87c1-fb9386f7114e-catalog-content\") pod \"community-operators-wzn6m\" (UID: \"00bdfdd4-092b-4071-87c1-fb9386f7114e\") " pod="openshift-marketplace/community-operators-wzn6m" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.254368 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb680718-f140-4525-950a-980e0dc1ed87-catalog-content\") pod \"certified-operators-frnbt\" (UID: \"eb680718-f140-4525-950a-980e0dc1ed87\") " pod="openshift-marketplace/certified-operators-frnbt" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.254387 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xm7dv\" (UniqueName: \"kubernetes.io/projected/eb680718-f140-4525-950a-980e0dc1ed87-kube-api-access-xm7dv\") pod \"certified-operators-frnbt\" (UID: \"eb680718-f140-4525-950a-980e0dc1ed87\") " pod="openshift-marketplace/certified-operators-frnbt" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.254406 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:44 crc kubenswrapper[5045]: E1125 23:01:44.254675 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:44.754663944 +0000 UTC m=+161.112323056 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.255835 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00bdfdd4-092b-4071-87c1-fb9386f7114e-utilities\") pod \"community-operators-wzn6m\" (UID: \"00bdfdd4-092b-4071-87c1-fb9386f7114e\") " pod="openshift-marketplace/community-operators-wzn6m" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.256071 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00bdfdd4-092b-4071-87c1-fb9386f7114e-catalog-content\") pod \"community-operators-wzn6m\" (UID: \"00bdfdd4-092b-4071-87c1-fb9386f7114e\") " pod="openshift-marketplace/community-operators-wzn6m" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.301494 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfv4q\" (UniqueName: \"kubernetes.io/projected/00bdfdd4-092b-4071-87c1-fb9386f7114e-kube-api-access-pfv4q\") pod \"community-operators-wzn6m\" (UID: \"00bdfdd4-092b-4071-87c1-fb9386f7114e\") " pod="openshift-marketplace/community-operators-wzn6m" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.350836 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wzn6m" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.355259 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.355454 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb680718-f140-4525-950a-980e0dc1ed87-catalog-content\") pod \"certified-operators-frnbt\" (UID: \"eb680718-f140-4525-950a-980e0dc1ed87\") " pod="openshift-marketplace/certified-operators-frnbt" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.355478 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xm7dv\" (UniqueName: \"kubernetes.io/projected/eb680718-f140-4525-950a-980e0dc1ed87-kube-api-access-xm7dv\") pod \"certified-operators-frnbt\" (UID: \"eb680718-f140-4525-950a-980e0dc1ed87\") " pod="openshift-marketplace/certified-operators-frnbt" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.355532 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb680718-f140-4525-950a-980e0dc1ed87-utilities\") pod \"certified-operators-frnbt\" (UID: \"eb680718-f140-4525-950a-980e0dc1ed87\") " pod="openshift-marketplace/certified-operators-frnbt" Nov 25 23:01:44 crc kubenswrapper[5045]: E1125 23:01:44.355850 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:44.855826798 +0000 UTC m=+161.213485910 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.355986 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb680718-f140-4525-950a-980e0dc1ed87-utilities\") pod \"certified-operators-frnbt\" (UID: \"eb680718-f140-4525-950a-980e0dc1ed87\") " pod="openshift-marketplace/certified-operators-frnbt" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.356119 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb680718-f140-4525-950a-980e0dc1ed87-catalog-content\") pod \"certified-operators-frnbt\" (UID: \"eb680718-f140-4525-950a-980e0dc1ed87\") " pod="openshift-marketplace/certified-operators-frnbt" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.386518 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xm7dv\" (UniqueName: \"kubernetes.io/projected/eb680718-f140-4525-950a-980e0dc1ed87-kube-api-access-xm7dv\") pod \"certified-operators-frnbt\" (UID: \"eb680718-f140-4525-950a-980e0dc1ed87\") " pod="openshift-marketplace/certified-operators-frnbt" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.455556 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wtczn"] Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.456603 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wtczn" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.456701 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:44 crc kubenswrapper[5045]: E1125 23:01:44.457059 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:44.957041963 +0000 UTC m=+161.314701075 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.467391 5045 patch_prober.go:28] interesting pod/router-default-5444994796-tnf9q container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 23:01:44 crc kubenswrapper[5045]: [-]has-synced failed: reason withheld Nov 25 23:01:44 crc kubenswrapper[5045]: [+]process-running ok Nov 25 23:01:44 crc kubenswrapper[5045]: healthz check failed Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.467464 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tnf9q" podUID="defb4b32-105c-4e11-8d80-1b482fd18f4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.477908 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wtczn"] Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.541552 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-frnbt" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.557356 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:44 crc kubenswrapper[5045]: E1125 23:01:44.557517 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:45.057492635 +0000 UTC m=+161.415151737 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.557598 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac4a3911-99ba-425c-ae87-a4bc36dfb406-catalog-content\") pod \"community-operators-wtczn\" (UID: \"ac4a3911-99ba-425c-ae87-a4bc36dfb406\") " pod="openshift-marketplace/community-operators-wtczn" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.557649 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.557696 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac4a3911-99ba-425c-ae87-a4bc36dfb406-utilities\") pod \"community-operators-wtczn\" (UID: \"ac4a3911-99ba-425c-ae87-a4bc36dfb406\") " pod="openshift-marketplace/community-operators-wtczn" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.557734 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pf7ns\" (UniqueName: \"kubernetes.io/projected/ac4a3911-99ba-425c-ae87-a4bc36dfb406-kube-api-access-pf7ns\") pod \"community-operators-wtczn\" (UID: \"ac4a3911-99ba-425c-ae87-a4bc36dfb406\") " pod="openshift-marketplace/community-operators-wtczn" Nov 25 23:01:44 crc kubenswrapper[5045]: E1125 23:01:44.557968 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 23:01:45.057960479 +0000 UTC m=+161.415619591 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-9cmr2" (UID: "0e953287-8cf8-4561-8a48-731746910551") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.589048 5045 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-25T23:01:44.137341464Z","Handler":null,"Name":""} Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.642857 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9srj4"] Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.649652 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9srj4" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.660271 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.660541 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pf7ns\" (UniqueName: \"kubernetes.io/projected/ac4a3911-99ba-425c-ae87-a4bc36dfb406-kube-api-access-pf7ns\") pod \"community-operators-wtczn\" (UID: \"ac4a3911-99ba-425c-ae87-a4bc36dfb406\") " pod="openshift-marketplace/community-operators-wtczn" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.660606 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac4a3911-99ba-425c-ae87-a4bc36dfb406-catalog-content\") pod \"community-operators-wtczn\" (UID: \"ac4a3911-99ba-425c-ae87-a4bc36dfb406\") " pod="openshift-marketplace/community-operators-wtczn" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.660661 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac4a3911-99ba-425c-ae87-a4bc36dfb406-utilities\") pod \"community-operators-wtczn\" (UID: \"ac4a3911-99ba-425c-ae87-a4bc36dfb406\") " pod="openshift-marketplace/community-operators-wtczn" Nov 25 23:01:44 crc kubenswrapper[5045]: E1125 23:01:44.661391 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 23:01:45.161373092 +0000 UTC m=+161.519032204 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.661434 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac4a3911-99ba-425c-ae87-a4bc36dfb406-utilities\") pod \"community-operators-wtczn\" (UID: \"ac4a3911-99ba-425c-ae87-a4bc36dfb406\") " pod="openshift-marketplace/community-operators-wtczn" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.661700 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac4a3911-99ba-425c-ae87-a4bc36dfb406-catalog-content\") pod \"community-operators-wtczn\" (UID: \"ac4a3911-99ba-425c-ae87-a4bc36dfb406\") " pod="openshift-marketplace/community-operators-wtczn" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.674890 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9srj4"] Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.680096 5045 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.680150 5045 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.709335 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pf7ns\" (UniqueName: \"kubernetes.io/projected/ac4a3911-99ba-425c-ae87-a4bc36dfb406-kube-api-access-pf7ns\") pod \"community-operators-wtczn\" (UID: \"ac4a3911-99ba-425c-ae87-a4bc36dfb406\") " pod="openshift-marketplace/community-operators-wtczn" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.762141 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d88d2d59-1cfc-48e3-99c0-fb45dca3f290-catalog-content\") pod \"certified-operators-9srj4\" (UID: \"d88d2d59-1cfc-48e3-99c0-fb45dca3f290\") " pod="openshift-marketplace/certified-operators-9srj4" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.762199 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.762219 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d88d2d59-1cfc-48e3-99c0-fb45dca3f290-utilities\") pod \"certified-operators-9srj4\" (UID: \"d88d2d59-1cfc-48e3-99c0-fb45dca3f290\") " pod="openshift-marketplace/certified-operators-9srj4" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.762268 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkkmn\" (UniqueName: \"kubernetes.io/projected/d88d2d59-1cfc-48e3-99c0-fb45dca3f290-kube-api-access-kkkmn\") pod \"certified-operators-9srj4\" (UID: \"d88d2d59-1cfc-48e3-99c0-fb45dca3f290\") " pod="openshift-marketplace/certified-operators-9srj4" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.769565 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wtczn" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.770506 5045 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.770545 5045 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.853483 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wzn6m"] Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.854482 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-9cmr2\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.865468 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.865970 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkkmn\" (UniqueName: \"kubernetes.io/projected/d88d2d59-1cfc-48e3-99c0-fb45dca3f290-kube-api-access-kkkmn\") pod \"certified-operators-9srj4\" (UID: \"d88d2d59-1cfc-48e3-99c0-fb45dca3f290\") " pod="openshift-marketplace/certified-operators-9srj4" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.866560 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d88d2d59-1cfc-48e3-99c0-fb45dca3f290-catalog-content\") pod \"certified-operators-9srj4\" (UID: \"d88d2d59-1cfc-48e3-99c0-fb45dca3f290\") " pod="openshift-marketplace/certified-operators-9srj4" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.866602 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d88d2d59-1cfc-48e3-99c0-fb45dca3f290-utilities\") pod \"certified-operators-9srj4\" (UID: \"d88d2d59-1cfc-48e3-99c0-fb45dca3f290\") " pod="openshift-marketplace/certified-operators-9srj4" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.867011 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d88d2d59-1cfc-48e3-99c0-fb45dca3f290-utilities\") pod \"certified-operators-9srj4\" (UID: \"d88d2d59-1cfc-48e3-99c0-fb45dca3f290\") " pod="openshift-marketplace/certified-operators-9srj4" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.867548 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d88d2d59-1cfc-48e3-99c0-fb45dca3f290-catalog-content\") pod \"certified-operators-9srj4\" (UID: \"d88d2d59-1cfc-48e3-99c0-fb45dca3f290\") " pod="openshift-marketplace/certified-operators-9srj4" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.892885 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.900743 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkkmn\" (UniqueName: \"kubernetes.io/projected/d88d2d59-1cfc-48e3-99c0-fb45dca3f290-kube-api-access-kkkmn\") pod \"certified-operators-9srj4\" (UID: \"d88d2d59-1cfc-48e3-99c0-fb45dca3f290\") " pod="openshift-marketplace/certified-operators-9srj4" Nov 25 23:01:44 crc kubenswrapper[5045]: I1125 23:01:44.980901 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9srj4" Nov 25 23:01:45 crc kubenswrapper[5045]: I1125 23:01:45.068700 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:45 crc kubenswrapper[5045]: I1125 23:01:45.083901 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-frnbt"] Nov 25 23:01:45 crc kubenswrapper[5045]: I1125 23:01:45.134001 5045 generic.go:334] "Generic (PLEG): container finished" podID="0980cdef-2742-484e-bdb0-9bc92de678b9" containerID="7fac6b99190def332ab40e1a19a5a05beab8e2a971367787982b78b8e162b388" exitCode=0 Nov 25 23:01:45 crc kubenswrapper[5045]: I1125 23:01:45.134062 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"0980cdef-2742-484e-bdb0-9bc92de678b9","Type":"ContainerDied","Data":"7fac6b99190def332ab40e1a19a5a05beab8e2a971367787982b78b8e162b388"} Nov 25 23:01:45 crc kubenswrapper[5045]: I1125 23:01:45.157043 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wzn6m" event={"ID":"00bdfdd4-092b-4071-87c1-fb9386f7114e","Type":"ContainerStarted","Data":"b8c96a9c7dc8a8fda82c3dcee889c49a0352ff067cc415a6ad6d168e67059531"} Nov 25 23:01:45 crc kubenswrapper[5045]: I1125 23:01:45.157117 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wzn6m" event={"ID":"00bdfdd4-092b-4071-87c1-fb9386f7114e","Type":"ContainerStarted","Data":"e3da41be3de5e434fef5de3319e89b618c0057cef73c38c392664b37cc914732"} Nov 25 23:01:45 crc kubenswrapper[5045]: I1125 23:01:45.188066 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" event={"ID":"7ce2064c-bf8f-45e8-a5bc-4b19774a54fd","Type":"ContainerStarted","Data":"1ace552051cd07e1d7636eb3138114eb326f27a62c117b09bb968ac58a3844ef"} Nov 25 23:01:45 crc kubenswrapper[5045]: I1125 23:01:45.248197 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-wxk7s" podStartSLOduration=11.248173396 podStartE2EDuration="11.248173396s" podCreationTimestamp="2025-11-25 23:01:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:45.248003821 +0000 UTC m=+161.605662933" watchObservedRunningTime="2025-11-25 23:01:45.248173396 +0000 UTC m=+161.605832508" Nov 25 23:01:45 crc kubenswrapper[5045]: I1125 23:01:45.277143 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wtczn"] Nov 25 23:01:45 crc kubenswrapper[5045]: I1125 23:01:45.451094 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9srj4"] Nov 25 23:01:45 crc kubenswrapper[5045]: I1125 23:01:45.470203 5045 patch_prober.go:28] interesting pod/router-default-5444994796-tnf9q container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 23:01:45 crc kubenswrapper[5045]: [-]has-synced failed: reason withheld Nov 25 23:01:45 crc kubenswrapper[5045]: [+]process-running ok Nov 25 23:01:45 crc kubenswrapper[5045]: healthz check failed Nov 25 23:01:45 crc kubenswrapper[5045]: I1125 23:01:45.470256 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tnf9q" podUID="defb4b32-105c-4e11-8d80-1b482fd18f4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 23:01:45 crc kubenswrapper[5045]: I1125 23:01:45.519899 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-9cmr2"] Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.027250 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-h2rl5"] Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.028176 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h2rl5" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.030309 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.037831 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-h2rl5"] Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.089518 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xw4v6\" (UniqueName: \"kubernetes.io/projected/571187eb-51e5-40d8-83b3-2295535de7e6-kube-api-access-xw4v6\") pod \"redhat-marketplace-h2rl5\" (UID: \"571187eb-51e5-40d8-83b3-2295535de7e6\") " pod="openshift-marketplace/redhat-marketplace-h2rl5" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.089613 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/571187eb-51e5-40d8-83b3-2295535de7e6-utilities\") pod \"redhat-marketplace-h2rl5\" (UID: \"571187eb-51e5-40d8-83b3-2295535de7e6\") " pod="openshift-marketplace/redhat-marketplace-h2rl5" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.089634 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/571187eb-51e5-40d8-83b3-2295535de7e6-catalog-content\") pod \"redhat-marketplace-h2rl5\" (UID: \"571187eb-51e5-40d8-83b3-2295535de7e6\") " pod="openshift-marketplace/redhat-marketplace-h2rl5" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.190961 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xw4v6\" (UniqueName: \"kubernetes.io/projected/571187eb-51e5-40d8-83b3-2295535de7e6-kube-api-access-xw4v6\") pod \"redhat-marketplace-h2rl5\" (UID: \"571187eb-51e5-40d8-83b3-2295535de7e6\") " pod="openshift-marketplace/redhat-marketplace-h2rl5" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.191285 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/571187eb-51e5-40d8-83b3-2295535de7e6-utilities\") pod \"redhat-marketplace-h2rl5\" (UID: \"571187eb-51e5-40d8-83b3-2295535de7e6\") " pod="openshift-marketplace/redhat-marketplace-h2rl5" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.191314 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/571187eb-51e5-40d8-83b3-2295535de7e6-catalog-content\") pod \"redhat-marketplace-h2rl5\" (UID: \"571187eb-51e5-40d8-83b3-2295535de7e6\") " pod="openshift-marketplace/redhat-marketplace-h2rl5" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.191840 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/571187eb-51e5-40d8-83b3-2295535de7e6-utilities\") pod \"redhat-marketplace-h2rl5\" (UID: \"571187eb-51e5-40d8-83b3-2295535de7e6\") " pod="openshift-marketplace/redhat-marketplace-h2rl5" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.191921 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/571187eb-51e5-40d8-83b3-2295535de7e6-catalog-content\") pod \"redhat-marketplace-h2rl5\" (UID: \"571187eb-51e5-40d8-83b3-2295535de7e6\") " pod="openshift-marketplace/redhat-marketplace-h2rl5" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.195952 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" event={"ID":"0e953287-8cf8-4561-8a48-731746910551","Type":"ContainerStarted","Data":"16cc8bd97e8dda0121ee7fbbdd719f5371122781d9c97e5a08b99ad6a203874d"} Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.196002 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" event={"ID":"0e953287-8cf8-4561-8a48-731746910551","Type":"ContainerStarted","Data":"b15ff89dde89d6d5f718eb54d668496efd14e9a0b1a24691a8230650660614b4"} Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.197101 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.199038 5045 generic.go:334] "Generic (PLEG): container finished" podID="d88d2d59-1cfc-48e3-99c0-fb45dca3f290" containerID="eca8dd3c5caf92a706cb07609b1f3e07ee61c08d5f6e9b7dde339ac3bc33bf49" exitCode=0 Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.199094 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9srj4" event={"ID":"d88d2d59-1cfc-48e3-99c0-fb45dca3f290","Type":"ContainerDied","Data":"eca8dd3c5caf92a706cb07609b1f3e07ee61c08d5f6e9b7dde339ac3bc33bf49"} Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.199115 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9srj4" event={"ID":"d88d2d59-1cfc-48e3-99c0-fb45dca3f290","Type":"ContainerStarted","Data":"466145bd76b1b5a2cc8b5b85e4407759289435ba601ac11bf95304e051e5cba2"} Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.200866 5045 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.202116 5045 generic.go:334] "Generic (PLEG): container finished" podID="ac4a3911-99ba-425c-ae87-a4bc36dfb406" containerID="5bbd4eaf1ef42f40b027385cd063b932f664d7abb6c6e19b8a5f9165e7e74077" exitCode=0 Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.202197 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wtczn" event={"ID":"ac4a3911-99ba-425c-ae87-a4bc36dfb406","Type":"ContainerDied","Data":"5bbd4eaf1ef42f40b027385cd063b932f664d7abb6c6e19b8a5f9165e7e74077"} Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.202231 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wtczn" event={"ID":"ac4a3911-99ba-425c-ae87-a4bc36dfb406","Type":"ContainerStarted","Data":"91d30879c720e4f02d48b325907af8a6fc3fa722959eae1e51908f79f39f3020"} Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.206413 5045 generic.go:334] "Generic (PLEG): container finished" podID="eb680718-f140-4525-950a-980e0dc1ed87" containerID="3d7aa5838756709b9c2c8addaef9ce28e2588f4b3836d472ff987f246b7a44fa" exitCode=0 Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.206487 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-frnbt" event={"ID":"eb680718-f140-4525-950a-980e0dc1ed87","Type":"ContainerDied","Data":"3d7aa5838756709b9c2c8addaef9ce28e2588f4b3836d472ff987f246b7a44fa"} Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.206512 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-frnbt" event={"ID":"eb680718-f140-4525-950a-980e0dc1ed87","Type":"ContainerStarted","Data":"b7a7c10bad72e3079ea5e6aa6a2fb635bbb35dd8b4a4584c4ae6ac12ec1c3d67"} Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.209384 5045 generic.go:334] "Generic (PLEG): container finished" podID="00bdfdd4-092b-4071-87c1-fb9386f7114e" containerID="b8c96a9c7dc8a8fda82c3dcee889c49a0352ff067cc415a6ad6d168e67059531" exitCode=0 Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.209489 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wzn6m" event={"ID":"00bdfdd4-092b-4071-87c1-fb9386f7114e","Type":"ContainerDied","Data":"b8c96a9c7dc8a8fda82c3dcee889c49a0352ff067cc415a6ad6d168e67059531"} Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.221284 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" podStartSLOduration=142.22126514 podStartE2EDuration="2m22.22126514s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:46.218700362 +0000 UTC m=+162.576359484" watchObservedRunningTime="2025-11-25 23:01:46.22126514 +0000 UTC m=+162.578924252" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.232960 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xw4v6\" (UniqueName: \"kubernetes.io/projected/571187eb-51e5-40d8-83b3-2295535de7e6-kube-api-access-xw4v6\") pod \"redhat-marketplace-h2rl5\" (UID: \"571187eb-51e5-40d8-83b3-2295535de7e6\") " pod="openshift-marketplace/redhat-marketplace-h2rl5" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.342089 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h2rl5" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.414253 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.439217 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-r4w4p"] Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.440258 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r4w4p" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.451557 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-r4w4p"] Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.466861 5045 patch_prober.go:28] interesting pod/router-default-5444994796-tnf9q container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 23:01:46 crc kubenswrapper[5045]: [-]has-synced failed: reason withheld Nov 25 23:01:46 crc kubenswrapper[5045]: [+]process-running ok Nov 25 23:01:46 crc kubenswrapper[5045]: healthz check failed Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.466914 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tnf9q" podUID="defb4b32-105c-4e11-8d80-1b482fd18f4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.497770 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rlmd\" (UniqueName: \"kubernetes.io/projected/28263e0a-9ac5-487e-9c95-8c2932ddcf74-kube-api-access-6rlmd\") pod \"redhat-marketplace-r4w4p\" (UID: \"28263e0a-9ac5-487e-9c95-8c2932ddcf74\") " pod="openshift-marketplace/redhat-marketplace-r4w4p" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.498271 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28263e0a-9ac5-487e-9c95-8c2932ddcf74-catalog-content\") pod \"redhat-marketplace-r4w4p\" (UID: \"28263e0a-9ac5-487e-9c95-8c2932ddcf74\") " pod="openshift-marketplace/redhat-marketplace-r4w4p" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.498311 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28263e0a-9ac5-487e-9c95-8c2932ddcf74-utilities\") pod \"redhat-marketplace-r4w4p\" (UID: \"28263e0a-9ac5-487e-9c95-8c2932ddcf74\") " pod="openshift-marketplace/redhat-marketplace-r4w4p" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.516215 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.598975 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-h2rl5"] Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.599178 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0980cdef-2742-484e-bdb0-9bc92de678b9-kubelet-dir\") pod \"0980cdef-2742-484e-bdb0-9bc92de678b9\" (UID: \"0980cdef-2742-484e-bdb0-9bc92de678b9\") " Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.599326 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0980cdef-2742-484e-bdb0-9bc92de678b9-kube-api-access\") pod \"0980cdef-2742-484e-bdb0-9bc92de678b9\" (UID: \"0980cdef-2742-484e-bdb0-9bc92de678b9\") " Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.599478 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28263e0a-9ac5-487e-9c95-8c2932ddcf74-catalog-content\") pod \"redhat-marketplace-r4w4p\" (UID: \"28263e0a-9ac5-487e-9c95-8c2932ddcf74\") " pod="openshift-marketplace/redhat-marketplace-r4w4p" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.599519 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28263e0a-9ac5-487e-9c95-8c2932ddcf74-utilities\") pod \"redhat-marketplace-r4w4p\" (UID: \"28263e0a-9ac5-487e-9c95-8c2932ddcf74\") " pod="openshift-marketplace/redhat-marketplace-r4w4p" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.599558 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rlmd\" (UniqueName: \"kubernetes.io/projected/28263e0a-9ac5-487e-9c95-8c2932ddcf74-kube-api-access-6rlmd\") pod \"redhat-marketplace-r4w4p\" (UID: \"28263e0a-9ac5-487e-9c95-8c2932ddcf74\") " pod="openshift-marketplace/redhat-marketplace-r4w4p" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.600674 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0980cdef-2742-484e-bdb0-9bc92de678b9-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "0980cdef-2742-484e-bdb0-9bc92de678b9" (UID: "0980cdef-2742-484e-bdb0-9bc92de678b9"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.601369 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28263e0a-9ac5-487e-9c95-8c2932ddcf74-utilities\") pod \"redhat-marketplace-r4w4p\" (UID: \"28263e0a-9ac5-487e-9c95-8c2932ddcf74\") " pod="openshift-marketplace/redhat-marketplace-r4w4p" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.603741 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28263e0a-9ac5-487e-9c95-8c2932ddcf74-catalog-content\") pod \"redhat-marketplace-r4w4p\" (UID: \"28263e0a-9ac5-487e-9c95-8c2932ddcf74\") " pod="openshift-marketplace/redhat-marketplace-r4w4p" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.604831 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0980cdef-2742-484e-bdb0-9bc92de678b9-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0980cdef-2742-484e-bdb0-9bc92de678b9" (UID: "0980cdef-2742-484e-bdb0-9bc92de678b9"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:01:46 crc kubenswrapper[5045]: W1125 23:01:46.609249 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod571187eb_51e5_40d8_83b3_2295535de7e6.slice/crio-12edf3cf9ee94af7ea6c8ef1083c601ff9fbc763637110cb319f3142495a3e98 WatchSource:0}: Error finding container 12edf3cf9ee94af7ea6c8ef1083c601ff9fbc763637110cb319f3142495a3e98: Status 404 returned error can't find the container with id 12edf3cf9ee94af7ea6c8ef1083c601ff9fbc763637110cb319f3142495a3e98 Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.624258 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rlmd\" (UniqueName: \"kubernetes.io/projected/28263e0a-9ac5-487e-9c95-8c2932ddcf74-kube-api-access-6rlmd\") pod \"redhat-marketplace-r4w4p\" (UID: \"28263e0a-9ac5-487e-9c95-8c2932ddcf74\") " pod="openshift-marketplace/redhat-marketplace-r4w4p" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.700811 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0980cdef-2742-484e-bdb0-9bc92de678b9-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.700842 5045 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0980cdef-2742-484e-bdb0-9bc92de678b9-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.766798 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r4w4p" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.792387 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.799049 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-dlljd" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.951840 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.960933 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.960984 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.968279 5045 patch_prober.go:28] interesting pod/console-f9d7485db-gmw7c container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.16:8443/health\": dial tcp 10.217.0.16:8443: connect: connection refused" start-of-body= Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.968325 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-gmw7c" podUID="2be9acdf-dc72-44e0-8674-ea5ba59cbbaa" containerName="console" probeResult="failure" output="Get \"https://10.217.0.16:8443/health\": dial tcp 10.217.0.16:8443: connect: connection refused" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.969616 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-whfmb" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.977466 5045 patch_prober.go:28] interesting pod/downloads-7954f5f757-ws5pv container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.977489 5045 patch_prober.go:28] interesting pod/downloads-7954f5f757-ws5pv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.977520 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-ws5pv" podUID="e59f0f14-0c21-43c9-baaa-bf860aaa16b3" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 25 23:01:46 crc kubenswrapper[5045]: I1125 23:01:46.977533 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-ws5pv" podUID="e59f0f14-0c21-43c9-baaa-bf860aaa16b3" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.125036 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-r4w4p"] Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.260220 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.261144 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"0980cdef-2742-484e-bdb0-9bc92de678b9","Type":"ContainerDied","Data":"4fa02b65a05f01d10f20158b21f9860e4f6027c609680c312c611c0e62853258"} Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.261190 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4fa02b65a05f01d10f20158b21f9860e4f6027c609680c312c611c0e62853258" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.263014 5045 generic.go:334] "Generic (PLEG): container finished" podID="571187eb-51e5-40d8-83b3-2295535de7e6" containerID="ea1d85f49b121331753b72dbf238889ed764621a9a5031644e4459b300c5b18c" exitCode=0 Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.263059 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h2rl5" event={"ID":"571187eb-51e5-40d8-83b3-2295535de7e6","Type":"ContainerDied","Data":"ea1d85f49b121331753b72dbf238889ed764621a9a5031644e4459b300c5b18c"} Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.263077 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h2rl5" event={"ID":"571187eb-51e5-40d8-83b3-2295535de7e6","Type":"ContainerStarted","Data":"12edf3cf9ee94af7ea6c8ef1083c601ff9fbc763637110cb319f3142495a3e98"} Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.265210 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r4w4p" event={"ID":"28263e0a-9ac5-487e-9c95-8c2932ddcf74","Type":"ContainerStarted","Data":"7998ffe80f2dc3c0ea98b4f9bbcd7f66a31ccb0cbc704f7063c0ccd2feca3938"} Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.269597 5045 generic.go:334] "Generic (PLEG): container finished" podID="e466426d-ad8f-46ce-813b-b0276253e555" containerID="07f8e9dad9f2efcf3691f68fdf377d08d3cd124f7b2240bd0d98fd897007936e" exitCode=0 Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.269690 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4" event={"ID":"e466426d-ad8f-46ce-813b-b0276253e555","Type":"ContainerDied","Data":"07f8e9dad9f2efcf3691f68fdf377d08d3cd124f7b2240bd0d98fd897007936e"} Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.330006 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs\") pod \"network-metrics-daemon-9rjvw\" (UID: \"9e044b50-b07a-44a0-b69f-45fd4392de24\") " pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.340677 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9e044b50-b07a-44a0-b69f-45fd4392de24-metrics-certs\") pod \"network-metrics-daemon-9rjvw\" (UID: \"9e044b50-b07a-44a0-b69f-45fd4392de24\") " pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.354261 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-84zml" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.447671 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-cntjg"] Nov 25 23:01:47 crc kubenswrapper[5045]: E1125 23:01:47.447911 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0980cdef-2742-484e-bdb0-9bc92de678b9" containerName="pruner" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.447923 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="0980cdef-2742-484e-bdb0-9bc92de678b9" containerName="pruner" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.448018 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="0980cdef-2742-484e-bdb0-9bc92de678b9" containerName="pruner" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.448686 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cntjg" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.454960 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cntjg"] Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.460960 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.461649 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.470134 5045 patch_prober.go:28] interesting pod/router-default-5444994796-tnf9q container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 23:01:47 crc kubenswrapper[5045]: [-]has-synced failed: reason withheld Nov 25 23:01:47 crc kubenswrapper[5045]: [+]process-running ok Nov 25 23:01:47 crc kubenswrapper[5045]: healthz check failed Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.470202 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tnf9q" podUID="defb4b32-105c-4e11-8d80-1b482fd18f4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.473191 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9rjvw" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.541619 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmmlb\" (UniqueName: \"kubernetes.io/projected/cc573acb-eee1-4849-967f-fd1b253b640f-kube-api-access-gmmlb\") pod \"redhat-operators-cntjg\" (UID: \"cc573acb-eee1-4849-967f-fd1b253b640f\") " pod="openshift-marketplace/redhat-operators-cntjg" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.542020 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc573acb-eee1-4849-967f-fd1b253b640f-utilities\") pod \"redhat-operators-cntjg\" (UID: \"cc573acb-eee1-4849-967f-fd1b253b640f\") " pod="openshift-marketplace/redhat-operators-cntjg" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.542122 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc573acb-eee1-4849-967f-fd1b253b640f-catalog-content\") pod \"redhat-operators-cntjg\" (UID: \"cc573acb-eee1-4849-967f-fd1b253b640f\") " pod="openshift-marketplace/redhat-operators-cntjg" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.643786 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc573acb-eee1-4849-967f-fd1b253b640f-utilities\") pod \"redhat-operators-cntjg\" (UID: \"cc573acb-eee1-4849-967f-fd1b253b640f\") " pod="openshift-marketplace/redhat-operators-cntjg" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.643861 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc573acb-eee1-4849-967f-fd1b253b640f-catalog-content\") pod \"redhat-operators-cntjg\" (UID: \"cc573acb-eee1-4849-967f-fd1b253b640f\") " pod="openshift-marketplace/redhat-operators-cntjg" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.643918 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmmlb\" (UniqueName: \"kubernetes.io/projected/cc573acb-eee1-4849-967f-fd1b253b640f-kube-api-access-gmmlb\") pod \"redhat-operators-cntjg\" (UID: \"cc573acb-eee1-4849-967f-fd1b253b640f\") " pod="openshift-marketplace/redhat-operators-cntjg" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.644453 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc573acb-eee1-4849-967f-fd1b253b640f-utilities\") pod \"redhat-operators-cntjg\" (UID: \"cc573acb-eee1-4849-967f-fd1b253b640f\") " pod="openshift-marketplace/redhat-operators-cntjg" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.644470 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc573acb-eee1-4849-967f-fd1b253b640f-catalog-content\") pod \"redhat-operators-cntjg\" (UID: \"cc573acb-eee1-4849-967f-fd1b253b640f\") " pod="openshift-marketplace/redhat-operators-cntjg" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.664854 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmmlb\" (UniqueName: \"kubernetes.io/projected/cc573acb-eee1-4849-967f-fd1b253b640f-kube-api-access-gmmlb\") pod \"redhat-operators-cntjg\" (UID: \"cc573acb-eee1-4849-967f-fd1b253b640f\") " pod="openshift-marketplace/redhat-operators-cntjg" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.796102 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cntjg" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.829095 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-v82tk"] Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.830337 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v82tk" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.847146 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v82tk"] Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.948393 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca230894-91a9-4e27-8fbf-a23be4cdade4-catalog-content\") pod \"redhat-operators-v82tk\" (UID: \"ca230894-91a9-4e27-8fbf-a23be4cdade4\") " pod="openshift-marketplace/redhat-operators-v82tk" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.948446 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cld2\" (UniqueName: \"kubernetes.io/projected/ca230894-91a9-4e27-8fbf-a23be4cdade4-kube-api-access-6cld2\") pod \"redhat-operators-v82tk\" (UID: \"ca230894-91a9-4e27-8fbf-a23be4cdade4\") " pod="openshift-marketplace/redhat-operators-v82tk" Nov 25 23:01:47 crc kubenswrapper[5045]: I1125 23:01:47.948530 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca230894-91a9-4e27-8fbf-a23be4cdade4-utilities\") pod \"redhat-operators-v82tk\" (UID: \"ca230894-91a9-4e27-8fbf-a23be4cdade4\") " pod="openshift-marketplace/redhat-operators-v82tk" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.049502 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca230894-91a9-4e27-8fbf-a23be4cdade4-catalog-content\") pod \"redhat-operators-v82tk\" (UID: \"ca230894-91a9-4e27-8fbf-a23be4cdade4\") " pod="openshift-marketplace/redhat-operators-v82tk" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.049556 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cld2\" (UniqueName: \"kubernetes.io/projected/ca230894-91a9-4e27-8fbf-a23be4cdade4-kube-api-access-6cld2\") pod \"redhat-operators-v82tk\" (UID: \"ca230894-91a9-4e27-8fbf-a23be4cdade4\") " pod="openshift-marketplace/redhat-operators-v82tk" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.049612 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca230894-91a9-4e27-8fbf-a23be4cdade4-utilities\") pod \"redhat-operators-v82tk\" (UID: \"ca230894-91a9-4e27-8fbf-a23be4cdade4\") " pod="openshift-marketplace/redhat-operators-v82tk" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.050399 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca230894-91a9-4e27-8fbf-a23be4cdade4-utilities\") pod \"redhat-operators-v82tk\" (UID: \"ca230894-91a9-4e27-8fbf-a23be4cdade4\") " pod="openshift-marketplace/redhat-operators-v82tk" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.050447 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca230894-91a9-4e27-8fbf-a23be4cdade4-catalog-content\") pod \"redhat-operators-v82tk\" (UID: \"ca230894-91a9-4e27-8fbf-a23be4cdade4\") " pod="openshift-marketplace/redhat-operators-v82tk" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.075774 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cld2\" (UniqueName: \"kubernetes.io/projected/ca230894-91a9-4e27-8fbf-a23be4cdade4-kube-api-access-6cld2\") pod \"redhat-operators-v82tk\" (UID: \"ca230894-91a9-4e27-8fbf-a23be4cdade4\") " pod="openshift-marketplace/redhat-operators-v82tk" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.078448 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-9rjvw"] Nov 25 23:01:48 crc kubenswrapper[5045]: W1125 23:01:48.116783 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9e044b50_b07a_44a0_b69f_45fd4392de24.slice/crio-bdd4ecdad407a278ea9652e18ee2051bc70ca91b86e4201a20daa804b8e2aff6 WatchSource:0}: Error finding container bdd4ecdad407a278ea9652e18ee2051bc70ca91b86e4201a20daa804b8e2aff6: Status 404 returned error can't find the container with id bdd4ecdad407a278ea9652e18ee2051bc70ca91b86e4201a20daa804b8e2aff6 Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.152258 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v82tk" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.250796 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.251770 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.254549 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.257360 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.260162 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.277467 5045 generic.go:334] "Generic (PLEG): container finished" podID="28263e0a-9ac5-487e-9c95-8c2932ddcf74" containerID="6830e2e9ef99019a27b696a24a3e3e36789c3ea87003a23abbcaceefc510dffe" exitCode=0 Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.277575 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r4w4p" event={"ID":"28263e0a-9ac5-487e-9c95-8c2932ddcf74","Type":"ContainerDied","Data":"6830e2e9ef99019a27b696a24a3e3e36789c3ea87003a23abbcaceefc510dffe"} Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.279556 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" event={"ID":"9e044b50-b07a-44a0-b69f-45fd4392de24","Type":"ContainerStarted","Data":"bdd4ecdad407a278ea9652e18ee2051bc70ca91b86e4201a20daa804b8e2aff6"} Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.345101 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cntjg"] Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.353909 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/feda008e-86cb-42c8-a374-86af8810daa2-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"feda008e-86cb-42c8-a374-86af8810daa2\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.354073 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/feda008e-86cb-42c8-a374-86af8810daa2-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"feda008e-86cb-42c8-a374-86af8810daa2\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.455017 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/feda008e-86cb-42c8-a374-86af8810daa2-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"feda008e-86cb-42c8-a374-86af8810daa2\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.455400 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/feda008e-86cb-42c8-a374-86af8810daa2-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"feda008e-86cb-42c8-a374-86af8810daa2\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.455552 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/feda008e-86cb-42c8-a374-86af8810daa2-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"feda008e-86cb-42c8-a374-86af8810daa2\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.463955 5045 patch_prober.go:28] interesting pod/router-default-5444994796-tnf9q container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 23:01:48 crc kubenswrapper[5045]: [-]has-synced failed: reason withheld Nov 25 23:01:48 crc kubenswrapper[5045]: [+]process-running ok Nov 25 23:01:48 crc kubenswrapper[5045]: healthz check failed Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.463997 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tnf9q" podUID="defb4b32-105c-4e11-8d80-1b482fd18f4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.481870 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/feda008e-86cb-42c8-a374-86af8810daa2-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"feda008e-86cb-42c8-a374-86af8810daa2\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.571287 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.614272 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.657795 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e466426d-ad8f-46ce-813b-b0276253e555-secret-volume\") pod \"e466426d-ad8f-46ce-813b-b0276253e555\" (UID: \"e466426d-ad8f-46ce-813b-b0276253e555\") " Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.658104 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5xp6r\" (UniqueName: \"kubernetes.io/projected/e466426d-ad8f-46ce-813b-b0276253e555-kube-api-access-5xp6r\") pod \"e466426d-ad8f-46ce-813b-b0276253e555\" (UID: \"e466426d-ad8f-46ce-813b-b0276253e555\") " Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.658133 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e466426d-ad8f-46ce-813b-b0276253e555-config-volume\") pod \"e466426d-ad8f-46ce-813b-b0276253e555\" (UID: \"e466426d-ad8f-46ce-813b-b0276253e555\") " Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.659289 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e466426d-ad8f-46ce-813b-b0276253e555-config-volume" (OuterVolumeSpecName: "config-volume") pod "e466426d-ad8f-46ce-813b-b0276253e555" (UID: "e466426d-ad8f-46ce-813b-b0276253e555"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.662395 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e466426d-ad8f-46ce-813b-b0276253e555-kube-api-access-5xp6r" (OuterVolumeSpecName: "kube-api-access-5xp6r") pod "e466426d-ad8f-46ce-813b-b0276253e555" (UID: "e466426d-ad8f-46ce-813b-b0276253e555"). InnerVolumeSpecName "kube-api-access-5xp6r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.662848 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e466426d-ad8f-46ce-813b-b0276253e555-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e466426d-ad8f-46ce-813b-b0276253e555" (UID: "e466426d-ad8f-46ce-813b-b0276253e555"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.701656 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v82tk"] Nov 25 23:01:48 crc kubenswrapper[5045]: W1125 23:01:48.747310 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podca230894_91a9_4e27_8fbf_a23be4cdade4.slice/crio-066d7db43790ee9b649d1cd1efbf21545b7ac03ac71e07f31c79319b09298204 WatchSource:0}: Error finding container 066d7db43790ee9b649d1cd1efbf21545b7ac03ac71e07f31c79319b09298204: Status 404 returned error can't find the container with id 066d7db43790ee9b649d1cd1efbf21545b7ac03ac71e07f31c79319b09298204 Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.758762 5045 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e466426d-ad8f-46ce-813b-b0276253e555-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.758784 5045 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e466426d-ad8f-46ce-813b-b0276253e555-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 23:01:48 crc kubenswrapper[5045]: I1125 23:01:48.758793 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5xp6r\" (UniqueName: \"kubernetes.io/projected/e466426d-ad8f-46ce-813b-b0276253e555-kube-api-access-5xp6r\") on node \"crc\" DevicePath \"\"" Nov 25 23:01:49 crc kubenswrapper[5045]: I1125 23:01:49.146905 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 23:01:49 crc kubenswrapper[5045]: I1125 23:01:49.246791 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-hbpfm" Nov 25 23:01:49 crc kubenswrapper[5045]: I1125 23:01:49.305231 5045 generic.go:334] "Generic (PLEG): container finished" podID="ca230894-91a9-4e27-8fbf-a23be4cdade4" containerID="6148c820bd54d7e65c6b852ef60bdad1e489928ce9ee27ca10f30cf7f38ac1fe" exitCode=0 Nov 25 23:01:49 crc kubenswrapper[5045]: I1125 23:01:49.305313 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v82tk" event={"ID":"ca230894-91a9-4e27-8fbf-a23be4cdade4","Type":"ContainerDied","Data":"6148c820bd54d7e65c6b852ef60bdad1e489928ce9ee27ca10f30cf7f38ac1fe"} Nov 25 23:01:49 crc kubenswrapper[5045]: I1125 23:01:49.305457 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v82tk" event={"ID":"ca230894-91a9-4e27-8fbf-a23be4cdade4","Type":"ContainerStarted","Data":"066d7db43790ee9b649d1cd1efbf21545b7ac03ac71e07f31c79319b09298204"} Nov 25 23:01:49 crc kubenswrapper[5045]: I1125 23:01:49.311819 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4" event={"ID":"e466426d-ad8f-46ce-813b-b0276253e555","Type":"ContainerDied","Data":"5c3a574133a754591d134991444568911b9dd6f9b17b6cc22d3c912d5c691725"} Nov 25 23:01:49 crc kubenswrapper[5045]: I1125 23:01:49.311856 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5c3a574133a754591d134991444568911b9dd6f9b17b6cc22d3c912d5c691725" Nov 25 23:01:49 crc kubenswrapper[5045]: I1125 23:01:49.311906 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4" Nov 25 23:01:49 crc kubenswrapper[5045]: I1125 23:01:49.350986 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" event={"ID":"9e044b50-b07a-44a0-b69f-45fd4392de24","Type":"ContainerStarted","Data":"2946afc2adbdd320af8099869af3079b637f4eb1064b7574642c10d9143c61f3"} Nov 25 23:01:49 crc kubenswrapper[5045]: I1125 23:01:49.351021 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-9rjvw" event={"ID":"9e044b50-b07a-44a0-b69f-45fd4392de24","Type":"ContainerStarted","Data":"22f6389d92cd908504a1bb2b22a9b1ab5c6b84435302a194bc810aab33d07ffa"} Nov 25 23:01:49 crc kubenswrapper[5045]: I1125 23:01:49.355637 5045 generic.go:334] "Generic (PLEG): container finished" podID="cc573acb-eee1-4849-967f-fd1b253b640f" containerID="762c86fa1d63c0fe0e947e795752af72f7c4ec4c82e84747e291273958c822b9" exitCode=0 Nov 25 23:01:49 crc kubenswrapper[5045]: I1125 23:01:49.355735 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cntjg" event={"ID":"cc573acb-eee1-4849-967f-fd1b253b640f","Type":"ContainerDied","Data":"762c86fa1d63c0fe0e947e795752af72f7c4ec4c82e84747e291273958c822b9"} Nov 25 23:01:49 crc kubenswrapper[5045]: I1125 23:01:49.355773 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cntjg" event={"ID":"cc573acb-eee1-4849-967f-fd1b253b640f","Type":"ContainerStarted","Data":"567ba330f8db8d29c8acde5d829c747e099f99bd9d9393ef57e4b40b489bea07"} Nov 25 23:01:49 crc kubenswrapper[5045]: I1125 23:01:49.357390 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"feda008e-86cb-42c8-a374-86af8810daa2","Type":"ContainerStarted","Data":"a4fba22817361cd0498632ea770a71b0d8c7335c9b851c7c8735d7d0026d79a3"} Nov 25 23:01:49 crc kubenswrapper[5045]: I1125 23:01:49.372315 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-9rjvw" podStartSLOduration=145.372297506 podStartE2EDuration="2m25.372297506s" podCreationTimestamp="2025-11-25 22:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:49.364026752 +0000 UTC m=+165.721685864" watchObservedRunningTime="2025-11-25 23:01:49.372297506 +0000 UTC m=+165.729956618" Nov 25 23:01:49 crc kubenswrapper[5045]: I1125 23:01:49.465023 5045 patch_prober.go:28] interesting pod/router-default-5444994796-tnf9q container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 23:01:49 crc kubenswrapper[5045]: [-]has-synced failed: reason withheld Nov 25 23:01:49 crc kubenswrapper[5045]: [+]process-running ok Nov 25 23:01:49 crc kubenswrapper[5045]: healthz check failed Nov 25 23:01:49 crc kubenswrapper[5045]: I1125 23:01:49.465342 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tnf9q" podUID="defb4b32-105c-4e11-8d80-1b482fd18f4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 23:01:50 crc kubenswrapper[5045]: I1125 23:01:50.372796 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"feda008e-86cb-42c8-a374-86af8810daa2","Type":"ContainerStarted","Data":"c350e685adcd6c66a2605894aae4c4ae53a0d8039c551bdc02ac68bfdb5368fd"} Nov 25 23:01:50 crc kubenswrapper[5045]: I1125 23:01:50.390407 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.390387972 podStartE2EDuration="2.390387972s" podCreationTimestamp="2025-11-25 23:01:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:01:50.386649528 +0000 UTC m=+166.744308640" watchObservedRunningTime="2025-11-25 23:01:50.390387972 +0000 UTC m=+166.748047104" Nov 25 23:01:50 crc kubenswrapper[5045]: I1125 23:01:50.464103 5045 patch_prober.go:28] interesting pod/router-default-5444994796-tnf9q container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 23:01:50 crc kubenswrapper[5045]: [-]has-synced failed: reason withheld Nov 25 23:01:50 crc kubenswrapper[5045]: [+]process-running ok Nov 25 23:01:50 crc kubenswrapper[5045]: healthz check failed Nov 25 23:01:50 crc kubenswrapper[5045]: I1125 23:01:50.464203 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tnf9q" podUID="defb4b32-105c-4e11-8d80-1b482fd18f4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 23:01:51 crc kubenswrapper[5045]: I1125 23:01:51.385949 5045 generic.go:334] "Generic (PLEG): container finished" podID="feda008e-86cb-42c8-a374-86af8810daa2" containerID="c350e685adcd6c66a2605894aae4c4ae53a0d8039c551bdc02ac68bfdb5368fd" exitCode=0 Nov 25 23:01:51 crc kubenswrapper[5045]: I1125 23:01:51.385990 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"feda008e-86cb-42c8-a374-86af8810daa2","Type":"ContainerDied","Data":"c350e685adcd6c66a2605894aae4c4ae53a0d8039c551bdc02ac68bfdb5368fd"} Nov 25 23:01:51 crc kubenswrapper[5045]: I1125 23:01:51.463524 5045 patch_prober.go:28] interesting pod/router-default-5444994796-tnf9q container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 23:01:51 crc kubenswrapper[5045]: [-]has-synced failed: reason withheld Nov 25 23:01:51 crc kubenswrapper[5045]: [+]process-running ok Nov 25 23:01:51 crc kubenswrapper[5045]: healthz check failed Nov 25 23:01:51 crc kubenswrapper[5045]: I1125 23:01:51.463590 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tnf9q" podUID="defb4b32-105c-4e11-8d80-1b482fd18f4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 23:01:52 crc kubenswrapper[5045]: I1125 23:01:52.463649 5045 patch_prober.go:28] interesting pod/router-default-5444994796-tnf9q container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 23:01:52 crc kubenswrapper[5045]: [-]has-synced failed: reason withheld Nov 25 23:01:52 crc kubenswrapper[5045]: [+]process-running ok Nov 25 23:01:52 crc kubenswrapper[5045]: healthz check failed Nov 25 23:01:52 crc kubenswrapper[5045]: I1125 23:01:52.463708 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tnf9q" podUID="defb4b32-105c-4e11-8d80-1b482fd18f4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 23:01:53 crc kubenswrapper[5045]: I1125 23:01:53.463706 5045 patch_prober.go:28] interesting pod/router-default-5444994796-tnf9q container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 23:01:53 crc kubenswrapper[5045]: [-]has-synced failed: reason withheld Nov 25 23:01:53 crc kubenswrapper[5045]: [+]process-running ok Nov 25 23:01:53 crc kubenswrapper[5045]: healthz check failed Nov 25 23:01:53 crc kubenswrapper[5045]: I1125 23:01:53.464680 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tnf9q" podUID="defb4b32-105c-4e11-8d80-1b482fd18f4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 23:01:54 crc kubenswrapper[5045]: I1125 23:01:54.463492 5045 patch_prober.go:28] interesting pod/router-default-5444994796-tnf9q container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 23:01:54 crc kubenswrapper[5045]: [-]has-synced failed: reason withheld Nov 25 23:01:54 crc kubenswrapper[5045]: [+]process-running ok Nov 25 23:01:54 crc kubenswrapper[5045]: healthz check failed Nov 25 23:01:54 crc kubenswrapper[5045]: I1125 23:01:54.463571 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tnf9q" podUID="defb4b32-105c-4e11-8d80-1b482fd18f4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 23:01:55 crc kubenswrapper[5045]: I1125 23:01:55.462883 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:55 crc kubenswrapper[5045]: I1125 23:01:55.465527 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-tnf9q" Nov 25 23:01:56 crc kubenswrapper[5045]: I1125 23:01:56.965015 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:56 crc kubenswrapper[5045]: I1125 23:01:56.968774 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:01:56 crc kubenswrapper[5045]: I1125 23:01:56.983935 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-ws5pv" Nov 25 23:02:00 crc kubenswrapper[5045]: I1125 23:02:00.540796 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:02:00 crc kubenswrapper[5045]: I1125 23:02:00.541946 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:02:02 crc kubenswrapper[5045]: I1125 23:02:02.496838 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 23:02:05 crc kubenswrapper[5045]: I1125 23:02:05.074952 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:02:09 crc kubenswrapper[5045]: I1125 23:02:09.076988 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 23:02:09 crc kubenswrapper[5045]: I1125 23:02:09.186789 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/feda008e-86cb-42c8-a374-86af8810daa2-kube-api-access\") pod \"feda008e-86cb-42c8-a374-86af8810daa2\" (UID: \"feda008e-86cb-42c8-a374-86af8810daa2\") " Nov 25 23:02:09 crc kubenswrapper[5045]: I1125 23:02:09.186870 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/feda008e-86cb-42c8-a374-86af8810daa2-kubelet-dir\") pod \"feda008e-86cb-42c8-a374-86af8810daa2\" (UID: \"feda008e-86cb-42c8-a374-86af8810daa2\") " Nov 25 23:02:09 crc kubenswrapper[5045]: I1125 23:02:09.187296 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/feda008e-86cb-42c8-a374-86af8810daa2-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "feda008e-86cb-42c8-a374-86af8810daa2" (UID: "feda008e-86cb-42c8-a374-86af8810daa2"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:02:09 crc kubenswrapper[5045]: I1125 23:02:09.194531 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/feda008e-86cb-42c8-a374-86af8810daa2-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "feda008e-86cb-42c8-a374-86af8810daa2" (UID: "feda008e-86cb-42c8-a374-86af8810daa2"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:02:09 crc kubenswrapper[5045]: I1125 23:02:09.288935 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/feda008e-86cb-42c8-a374-86af8810daa2-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 23:02:09 crc kubenswrapper[5045]: I1125 23:02:09.288970 5045 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/feda008e-86cb-42c8-a374-86af8810daa2-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 23:02:09 crc kubenswrapper[5045]: I1125 23:02:09.515501 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"feda008e-86cb-42c8-a374-86af8810daa2","Type":"ContainerDied","Data":"a4fba22817361cd0498632ea770a71b0d8c7335c9b851c7c8735d7d0026d79a3"} Nov 25 23:02:09 crc kubenswrapper[5045]: I1125 23:02:09.515557 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4fba22817361cd0498632ea770a71b0d8c7335c9b851c7c8735d7d0026d79a3" Nov 25 23:02:09 crc kubenswrapper[5045]: I1125 23:02:09.515630 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 23:02:17 crc kubenswrapper[5045]: I1125 23:02:17.165468 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fs58x" Nov 25 23:02:18 crc kubenswrapper[5045]: E1125 23:02:18.320963 5045 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 25 23:02:18 crc kubenswrapper[5045]: E1125 23:02:18.321158 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6rlmd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-r4w4p_openshift-marketplace(28263e0a-9ac5-487e-9c95-8c2932ddcf74): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 23:02:18 crc kubenswrapper[5045]: E1125 23:02:18.321791 5045 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 25 23:02:18 crc kubenswrapper[5045]: E1125 23:02:18.321930 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kkkmn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-9srj4_openshift-marketplace(d88d2d59-1cfc-48e3-99c0-fb45dca3f290): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 23:02:18 crc kubenswrapper[5045]: E1125 23:02:18.322939 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-r4w4p" podUID="28263e0a-9ac5-487e-9c95-8c2932ddcf74" Nov 25 23:02:18 crc kubenswrapper[5045]: E1125 23:02:18.323001 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-9srj4" podUID="d88d2d59-1cfc-48e3-99c0-fb45dca3f290" Nov 25 23:02:18 crc kubenswrapper[5045]: E1125 23:02:18.329593 5045 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 25 23:02:18 crc kubenswrapper[5045]: E1125 23:02:18.329820 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xm7dv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-frnbt_openshift-marketplace(eb680718-f140-4525-950a-980e0dc1ed87): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 23:02:18 crc kubenswrapper[5045]: E1125 23:02:18.331000 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-frnbt" podUID="eb680718-f140-4525-950a-980e0dc1ed87" Nov 25 23:02:25 crc kubenswrapper[5045]: E1125 23:02:25.813612 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-r4w4p" podUID="28263e0a-9ac5-487e-9c95-8c2932ddcf74" Nov 25 23:02:25 crc kubenswrapper[5045]: E1125 23:02:25.813624 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-9srj4" podUID="d88d2d59-1cfc-48e3-99c0-fb45dca3f290" Nov 25 23:02:25 crc kubenswrapper[5045]: E1125 23:02:25.813624 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-frnbt" podUID="eb680718-f140-4525-950a-980e0dc1ed87" Nov 25 23:02:28 crc kubenswrapper[5045]: E1125 23:02:28.284470 5045 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 25 23:02:28 crc kubenswrapper[5045]: E1125 23:02:28.284972 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6cld2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-v82tk_openshift-marketplace(ca230894-91a9-4e27-8fbf-a23be4cdade4): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 23:02:28 crc kubenswrapper[5045]: E1125 23:02:28.286761 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-v82tk" podUID="ca230894-91a9-4e27-8fbf-a23be4cdade4" Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.105909 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 23:02:30 crc kubenswrapper[5045]: E1125 23:02:30.106332 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="feda008e-86cb-42c8-a374-86af8810daa2" containerName="pruner" Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.106361 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="feda008e-86cb-42c8-a374-86af8810daa2" containerName="pruner" Nov 25 23:02:30 crc kubenswrapper[5045]: E1125 23:02:30.106399 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e466426d-ad8f-46ce-813b-b0276253e555" containerName="collect-profiles" Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.106420 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="e466426d-ad8f-46ce-813b-b0276253e555" containerName="collect-profiles" Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.106633 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="feda008e-86cb-42c8-a374-86af8810daa2" containerName="pruner" Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.106660 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="e466426d-ad8f-46ce-813b-b0276253e555" containerName="collect-profiles" Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.107362 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.111460 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.112495 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.126578 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.190699 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63c0a41a-5000-45e6-8d35-1bcb273bf0ab-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"63c0a41a-5000-45e6-8d35-1bcb273bf0ab\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.190768 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/63c0a41a-5000-45e6-8d35-1bcb273bf0ab-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"63c0a41a-5000-45e6-8d35-1bcb273bf0ab\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.292393 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63c0a41a-5000-45e6-8d35-1bcb273bf0ab-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"63c0a41a-5000-45e6-8d35-1bcb273bf0ab\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.292489 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/63c0a41a-5000-45e6-8d35-1bcb273bf0ab-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"63c0a41a-5000-45e6-8d35-1bcb273bf0ab\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.292743 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/63c0a41a-5000-45e6-8d35-1bcb273bf0ab-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"63c0a41a-5000-45e6-8d35-1bcb273bf0ab\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.326920 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63c0a41a-5000-45e6-8d35-1bcb273bf0ab-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"63c0a41a-5000-45e6-8d35-1bcb273bf0ab\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.439035 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 23:02:30 crc kubenswrapper[5045]: E1125 23:02:30.540350 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-v82tk" podUID="ca230894-91a9-4e27-8fbf-a23be4cdade4" Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.540977 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.541041 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.541099 5045 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.542007 5045 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c"} pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 23:02:30 crc kubenswrapper[5045]: I1125 23:02:30.542186 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" containerID="cri-o://fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c" gracePeriod=600 Nov 25 23:02:30 crc kubenswrapper[5045]: E1125 23:02:30.584958 5045 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 25 23:02:30 crc kubenswrapper[5045]: E1125 23:02:30.585132 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gmmlb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-cntjg_openshift-marketplace(cc573acb-eee1-4849-967f-fd1b253b640f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 23:02:30 crc kubenswrapper[5045]: E1125 23:02:30.586488 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-cntjg" podUID="cc573acb-eee1-4849-967f-fd1b253b640f" Nov 25 23:02:30 crc kubenswrapper[5045]: E1125 23:02:30.652856 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-cntjg" podUID="cc573acb-eee1-4849-967f-fd1b253b640f" Nov 25 23:02:30 crc kubenswrapper[5045]: E1125 23:02:30.766256 5045 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 25 23:02:30 crc kubenswrapper[5045]: E1125 23:02:30.766765 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pfv4q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-wzn6m_openshift-marketplace(00bdfdd4-092b-4071-87c1-fb9386f7114e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 23:02:30 crc kubenswrapper[5045]: E1125 23:02:30.768196 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-wzn6m" podUID="00bdfdd4-092b-4071-87c1-fb9386f7114e" Nov 25 23:02:30 crc kubenswrapper[5045]: E1125 23:02:30.873679 5045 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 25 23:02:30 crc kubenswrapper[5045]: E1125 23:02:30.874189 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pf7ns,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-wtczn_openshift-marketplace(ac4a3911-99ba-425c-ae87-a4bc36dfb406): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 23:02:30 crc kubenswrapper[5045]: E1125 23:02:30.875417 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-wtczn" podUID="ac4a3911-99ba-425c-ae87-a4bc36dfb406" Nov 25 23:02:30 crc kubenswrapper[5045]: E1125 23:02:30.974835 5045 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 25 23:02:30 crc kubenswrapper[5045]: E1125 23:02:30.975045 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xw4v6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-h2rl5_openshift-marketplace(571187eb-51e5-40d8-83b3-2295535de7e6): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 23:02:30 crc kubenswrapper[5045]: E1125 23:02:30.976221 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-h2rl5" podUID="571187eb-51e5-40d8-83b3-2295535de7e6" Nov 25 23:02:31 crc kubenswrapper[5045]: I1125 23:02:31.082835 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 23:02:31 crc kubenswrapper[5045]: W1125 23:02:31.090649 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod63c0a41a_5000_45e6_8d35_1bcb273bf0ab.slice/crio-801079bd9d9a9ebde51c0b13395cc383ea6ba854ab48dfb6be8ddfcffffd068e WatchSource:0}: Error finding container 801079bd9d9a9ebde51c0b13395cc383ea6ba854ab48dfb6be8ddfcffffd068e: Status 404 returned error can't find the container with id 801079bd9d9a9ebde51c0b13395cc383ea6ba854ab48dfb6be8ddfcffffd068e Nov 25 23:02:31 crc kubenswrapper[5045]: I1125 23:02:31.658075 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"63c0a41a-5000-45e6-8d35-1bcb273bf0ab","Type":"ContainerStarted","Data":"f3cdf66fdf9e622242a05642e15d2ce89123cf01413f0e0f748ac69feb3fd7e9"} Nov 25 23:02:31 crc kubenswrapper[5045]: I1125 23:02:31.658535 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"63c0a41a-5000-45e6-8d35-1bcb273bf0ab","Type":"ContainerStarted","Data":"801079bd9d9a9ebde51c0b13395cc383ea6ba854ab48dfb6be8ddfcffffd068e"} Nov 25 23:02:31 crc kubenswrapper[5045]: I1125 23:02:31.663794 5045 generic.go:334] "Generic (PLEG): container finished" podID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerID="fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c" exitCode=0 Nov 25 23:02:31 crc kubenswrapper[5045]: I1125 23:02:31.664521 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerDied","Data":"fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c"} Nov 25 23:02:31 crc kubenswrapper[5045]: I1125 23:02:31.664539 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerStarted","Data":"43b4ed36c05a672d0150875c65cb7d95bf353277109bd9005a037fea7220d422"} Nov 25 23:02:31 crc kubenswrapper[5045]: E1125 23:02:31.665760 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-h2rl5" podUID="571187eb-51e5-40d8-83b3-2295535de7e6" Nov 25 23:02:31 crc kubenswrapper[5045]: E1125 23:02:31.666001 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-wtczn" podUID="ac4a3911-99ba-425c-ae87-a4bc36dfb406" Nov 25 23:02:31 crc kubenswrapper[5045]: E1125 23:02:31.666969 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-wzn6m" podUID="00bdfdd4-092b-4071-87c1-fb9386f7114e" Nov 25 23:02:31 crc kubenswrapper[5045]: I1125 23:02:31.675990 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=1.6759502149999999 podStartE2EDuration="1.675950215s" podCreationTimestamp="2025-11-25 23:02:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:02:31.674476939 +0000 UTC m=+208.032136051" watchObservedRunningTime="2025-11-25 23:02:31.675950215 +0000 UTC m=+208.033609357" Nov 25 23:02:32 crc kubenswrapper[5045]: I1125 23:02:32.670162 5045 generic.go:334] "Generic (PLEG): container finished" podID="63c0a41a-5000-45e6-8d35-1bcb273bf0ab" containerID="f3cdf66fdf9e622242a05642e15d2ce89123cf01413f0e0f748ac69feb3fd7e9" exitCode=0 Nov 25 23:02:32 crc kubenswrapper[5045]: I1125 23:02:32.670216 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"63c0a41a-5000-45e6-8d35-1bcb273bf0ab","Type":"ContainerDied","Data":"f3cdf66fdf9e622242a05642e15d2ce89123cf01413f0e0f748ac69feb3fd7e9"} Nov 25 23:02:33 crc kubenswrapper[5045]: I1125 23:02:33.984623 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 23:02:34 crc kubenswrapper[5045]: I1125 23:02:34.083245 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/63c0a41a-5000-45e6-8d35-1bcb273bf0ab-kubelet-dir\") pod \"63c0a41a-5000-45e6-8d35-1bcb273bf0ab\" (UID: \"63c0a41a-5000-45e6-8d35-1bcb273bf0ab\") " Nov 25 23:02:34 crc kubenswrapper[5045]: I1125 23:02:34.083310 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63c0a41a-5000-45e6-8d35-1bcb273bf0ab-kube-api-access\") pod \"63c0a41a-5000-45e6-8d35-1bcb273bf0ab\" (UID: \"63c0a41a-5000-45e6-8d35-1bcb273bf0ab\") " Nov 25 23:02:34 crc kubenswrapper[5045]: I1125 23:02:34.084577 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/63c0a41a-5000-45e6-8d35-1bcb273bf0ab-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "63c0a41a-5000-45e6-8d35-1bcb273bf0ab" (UID: "63c0a41a-5000-45e6-8d35-1bcb273bf0ab"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:02:34 crc kubenswrapper[5045]: I1125 23:02:34.104066 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63c0a41a-5000-45e6-8d35-1bcb273bf0ab-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "63c0a41a-5000-45e6-8d35-1bcb273bf0ab" (UID: "63c0a41a-5000-45e6-8d35-1bcb273bf0ab"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:02:34 crc kubenswrapper[5045]: I1125 23:02:34.184620 5045 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/63c0a41a-5000-45e6-8d35-1bcb273bf0ab-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 23:02:34 crc kubenswrapper[5045]: I1125 23:02:34.184682 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63c0a41a-5000-45e6-8d35-1bcb273bf0ab-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 23:02:34 crc kubenswrapper[5045]: I1125 23:02:34.687679 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"63c0a41a-5000-45e6-8d35-1bcb273bf0ab","Type":"ContainerDied","Data":"801079bd9d9a9ebde51c0b13395cc383ea6ba854ab48dfb6be8ddfcffffd068e"} Nov 25 23:02:34 crc kubenswrapper[5045]: I1125 23:02:34.688641 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="801079bd9d9a9ebde51c0b13395cc383ea6ba854ab48dfb6be8ddfcffffd068e" Nov 25 23:02:34 crc kubenswrapper[5045]: I1125 23:02:34.687848 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 23:02:36 crc kubenswrapper[5045]: I1125 23:02:36.299572 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 23:02:36 crc kubenswrapper[5045]: E1125 23:02:36.300613 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63c0a41a-5000-45e6-8d35-1bcb273bf0ab" containerName="pruner" Nov 25 23:02:36 crc kubenswrapper[5045]: I1125 23:02:36.300633 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="63c0a41a-5000-45e6-8d35-1bcb273bf0ab" containerName="pruner" Nov 25 23:02:36 crc kubenswrapper[5045]: I1125 23:02:36.300862 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="63c0a41a-5000-45e6-8d35-1bcb273bf0ab" containerName="pruner" Nov 25 23:02:36 crc kubenswrapper[5045]: I1125 23:02:36.301487 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 23:02:36 crc kubenswrapper[5045]: I1125 23:02:36.304941 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 23:02:36 crc kubenswrapper[5045]: I1125 23:02:36.305040 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 23:02:36 crc kubenswrapper[5045]: I1125 23:02:36.327856 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 23:02:36 crc kubenswrapper[5045]: I1125 23:02:36.419752 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/90af59c5-19b9-410b-9913-c678750e67c0-var-lock\") pod \"installer-9-crc\" (UID: \"90af59c5-19b9-410b-9913-c678750e67c0\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 23:02:36 crc kubenswrapper[5045]: I1125 23:02:36.419906 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/90af59c5-19b9-410b-9913-c678750e67c0-kubelet-dir\") pod \"installer-9-crc\" (UID: \"90af59c5-19b9-410b-9913-c678750e67c0\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 23:02:36 crc kubenswrapper[5045]: I1125 23:02:36.419943 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/90af59c5-19b9-410b-9913-c678750e67c0-kube-api-access\") pod \"installer-9-crc\" (UID: \"90af59c5-19b9-410b-9913-c678750e67c0\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 23:02:36 crc kubenswrapper[5045]: I1125 23:02:36.520932 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/90af59c5-19b9-410b-9913-c678750e67c0-kubelet-dir\") pod \"installer-9-crc\" (UID: \"90af59c5-19b9-410b-9913-c678750e67c0\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 23:02:36 crc kubenswrapper[5045]: I1125 23:02:36.520998 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/90af59c5-19b9-410b-9913-c678750e67c0-kube-api-access\") pod \"installer-9-crc\" (UID: \"90af59c5-19b9-410b-9913-c678750e67c0\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 23:02:36 crc kubenswrapper[5045]: I1125 23:02:36.521065 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/90af59c5-19b9-410b-9913-c678750e67c0-var-lock\") pod \"installer-9-crc\" (UID: \"90af59c5-19b9-410b-9913-c678750e67c0\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 23:02:36 crc kubenswrapper[5045]: I1125 23:02:36.522133 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/90af59c5-19b9-410b-9913-c678750e67c0-kubelet-dir\") pod \"installer-9-crc\" (UID: \"90af59c5-19b9-410b-9913-c678750e67c0\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 23:02:36 crc kubenswrapper[5045]: I1125 23:02:36.522428 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/90af59c5-19b9-410b-9913-c678750e67c0-var-lock\") pod \"installer-9-crc\" (UID: \"90af59c5-19b9-410b-9913-c678750e67c0\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 23:02:36 crc kubenswrapper[5045]: I1125 23:02:36.554664 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/90af59c5-19b9-410b-9913-c678750e67c0-kube-api-access\") pod \"installer-9-crc\" (UID: \"90af59c5-19b9-410b-9913-c678750e67c0\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 23:02:36 crc kubenswrapper[5045]: I1125 23:02:36.624415 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 23:02:37 crc kubenswrapper[5045]: I1125 23:02:37.078762 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 23:02:37 crc kubenswrapper[5045]: I1125 23:02:37.707289 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"90af59c5-19b9-410b-9913-c678750e67c0","Type":"ContainerStarted","Data":"a8f7f3b7bb6a57a95937db6730726761e2464a72752de80a7545e932e09093c0"} Nov 25 23:02:37 crc kubenswrapper[5045]: I1125 23:02:37.707612 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"90af59c5-19b9-410b-9913-c678750e67c0","Type":"ContainerStarted","Data":"00aad0f3d96f1f5c06f05ef99b38801c6e2de8613da6f63392a529995f8deb83"} Nov 25 23:02:38 crc kubenswrapper[5045]: I1125 23:02:38.713685 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-frnbt" event={"ID":"eb680718-f140-4525-950a-980e0dc1ed87","Type":"ContainerStarted","Data":"b7aa1e179987a81f2674d757af38de20e9a73d204c212481f0ccfa87df3b1564"} Nov 25 23:02:38 crc kubenswrapper[5045]: I1125 23:02:38.715823 5045 generic.go:334] "Generic (PLEG): container finished" podID="28263e0a-9ac5-487e-9c95-8c2932ddcf74" containerID="3cb03c96623ef86aa0b8e0fc56d5ea9257b4ef0d3f28ba9f1b7b69e23fb5313f" exitCode=0 Nov 25 23:02:38 crc kubenswrapper[5045]: I1125 23:02:38.716244 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r4w4p" event={"ID":"28263e0a-9ac5-487e-9c95-8c2932ddcf74","Type":"ContainerDied","Data":"3cb03c96623ef86aa0b8e0fc56d5ea9257b4ef0d3f28ba9f1b7b69e23fb5313f"} Nov 25 23:02:38 crc kubenswrapper[5045]: I1125 23:02:38.785475 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.785455959 podStartE2EDuration="2.785455959s" podCreationTimestamp="2025-11-25 23:02:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:02:38.782669332 +0000 UTC m=+215.140328434" watchObservedRunningTime="2025-11-25 23:02:38.785455959 +0000 UTC m=+215.143115071" Nov 25 23:02:39 crc kubenswrapper[5045]: I1125 23:02:39.722745 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r4w4p" event={"ID":"28263e0a-9ac5-487e-9c95-8c2932ddcf74","Type":"ContainerStarted","Data":"c63074a0b61de80e034a6e1a599bc508409b7d3490f7c195139ec646c20c6bdb"} Nov 25 23:02:39 crc kubenswrapper[5045]: I1125 23:02:39.725428 5045 generic.go:334] "Generic (PLEG): container finished" podID="d88d2d59-1cfc-48e3-99c0-fb45dca3f290" containerID="ca4d562fdf1512b9ef61b82ddabfe50d36d7fe662037e42c43e2886c268b8c65" exitCode=0 Nov 25 23:02:39 crc kubenswrapper[5045]: I1125 23:02:39.725507 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9srj4" event={"ID":"d88d2d59-1cfc-48e3-99c0-fb45dca3f290","Type":"ContainerDied","Data":"ca4d562fdf1512b9ef61b82ddabfe50d36d7fe662037e42c43e2886c268b8c65"} Nov 25 23:02:39 crc kubenswrapper[5045]: I1125 23:02:39.727300 5045 generic.go:334] "Generic (PLEG): container finished" podID="eb680718-f140-4525-950a-980e0dc1ed87" containerID="b7aa1e179987a81f2674d757af38de20e9a73d204c212481f0ccfa87df3b1564" exitCode=0 Nov 25 23:02:39 crc kubenswrapper[5045]: I1125 23:02:39.727348 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-frnbt" event={"ID":"eb680718-f140-4525-950a-980e0dc1ed87","Type":"ContainerDied","Data":"b7aa1e179987a81f2674d757af38de20e9a73d204c212481f0ccfa87df3b1564"} Nov 25 23:02:39 crc kubenswrapper[5045]: I1125 23:02:39.752870 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-r4w4p" podStartSLOduration=2.862220946 podStartE2EDuration="53.752847364s" podCreationTimestamp="2025-11-25 23:01:46 +0000 UTC" firstStartedPulling="2025-11-25 23:01:48.279155228 +0000 UTC m=+164.636814340" lastFinishedPulling="2025-11-25 23:02:39.169781646 +0000 UTC m=+215.527440758" observedRunningTime="2025-11-25 23:02:39.747760305 +0000 UTC m=+216.105419447" watchObservedRunningTime="2025-11-25 23:02:39.752847364 +0000 UTC m=+216.110506506" Nov 25 23:02:40 crc kubenswrapper[5045]: I1125 23:02:40.736101 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9srj4" event={"ID":"d88d2d59-1cfc-48e3-99c0-fb45dca3f290","Type":"ContainerStarted","Data":"fe9db755c4c44b013577439cc63e44330f3f8a5c60bd1f53fea7df490e38475f"} Nov 25 23:02:40 crc kubenswrapper[5045]: I1125 23:02:40.738894 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-frnbt" event={"ID":"eb680718-f140-4525-950a-980e0dc1ed87","Type":"ContainerStarted","Data":"93d284e10f6e955c17fc17d5b29443358521e91246251ddd51c9c21301f684d1"} Nov 25 23:02:40 crc kubenswrapper[5045]: I1125 23:02:40.757563 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9srj4" podStartSLOduration=2.5687870090000002 podStartE2EDuration="56.757545208s" podCreationTimestamp="2025-11-25 23:01:44 +0000 UTC" firstStartedPulling="2025-11-25 23:01:46.200569235 +0000 UTC m=+162.558228357" lastFinishedPulling="2025-11-25 23:02:40.389327444 +0000 UTC m=+216.746986556" observedRunningTime="2025-11-25 23:02:40.754043709 +0000 UTC m=+217.111702841" watchObservedRunningTime="2025-11-25 23:02:40.757545208 +0000 UTC m=+217.115204320" Nov 25 23:02:44 crc kubenswrapper[5045]: I1125 23:02:44.413281 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-frnbt" podStartSLOduration=6.257924893 podStartE2EDuration="1m0.41326646s" podCreationTimestamp="2025-11-25 23:01:44 +0000 UTC" firstStartedPulling="2025-11-25 23:01:46.208107797 +0000 UTC m=+162.565766909" lastFinishedPulling="2025-11-25 23:02:40.363449354 +0000 UTC m=+216.721108476" observedRunningTime="2025-11-25 23:02:40.772074123 +0000 UTC m=+217.129733235" watchObservedRunningTime="2025-11-25 23:02:44.41326646 +0000 UTC m=+220.770925572" Nov 25 23:02:44 crc kubenswrapper[5045]: I1125 23:02:44.542642 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-frnbt" Nov 25 23:02:44 crc kubenswrapper[5045]: I1125 23:02:44.542787 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-frnbt" Nov 25 23:02:44 crc kubenswrapper[5045]: I1125 23:02:44.771702 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-frnbt" Nov 25 23:02:44 crc kubenswrapper[5045]: I1125 23:02:44.982691 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9srj4" Nov 25 23:02:44 crc kubenswrapper[5045]: I1125 23:02:44.982778 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9srj4" Nov 25 23:02:45 crc kubenswrapper[5045]: I1125 23:02:45.033464 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9srj4" Nov 25 23:02:45 crc kubenswrapper[5045]: I1125 23:02:45.767164 5045 generic.go:334] "Generic (PLEG): container finished" podID="ac4a3911-99ba-425c-ae87-a4bc36dfb406" containerID="9b7f42cb336590a08a3e5eeed49381a796f888c0fe11336756181a215932f9b4" exitCode=0 Nov 25 23:02:45 crc kubenswrapper[5045]: I1125 23:02:45.767261 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wtczn" event={"ID":"ac4a3911-99ba-425c-ae87-a4bc36dfb406","Type":"ContainerDied","Data":"9b7f42cb336590a08a3e5eeed49381a796f888c0fe11336756181a215932f9b4"} Nov 25 23:02:45 crc kubenswrapper[5045]: I1125 23:02:45.769164 5045 generic.go:334] "Generic (PLEG): container finished" podID="00bdfdd4-092b-4071-87c1-fb9386f7114e" containerID="104e6e4721f788ddba35f592c82c8dc3ca1a26bc6ee0590788dfcccc7686f757" exitCode=0 Nov 25 23:02:45 crc kubenswrapper[5045]: I1125 23:02:45.769887 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wzn6m" event={"ID":"00bdfdd4-092b-4071-87c1-fb9386f7114e","Type":"ContainerDied","Data":"104e6e4721f788ddba35f592c82c8dc3ca1a26bc6ee0590788dfcccc7686f757"} Nov 25 23:02:45 crc kubenswrapper[5045]: I1125 23:02:45.813101 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9srj4" Nov 25 23:02:46 crc kubenswrapper[5045]: I1125 23:02:46.767443 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-r4w4p" Nov 25 23:02:46 crc kubenswrapper[5045]: I1125 23:02:46.768016 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-r4w4p" Nov 25 23:02:46 crc kubenswrapper[5045]: I1125 23:02:46.775893 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wzn6m" event={"ID":"00bdfdd4-092b-4071-87c1-fb9386f7114e","Type":"ContainerStarted","Data":"5ca1956ce6e1bd8cf01a60299f45ea23e286a1355dab057d963ec532b4661c27"} Nov 25 23:02:46 crc kubenswrapper[5045]: I1125 23:02:46.777512 5045 generic.go:334] "Generic (PLEG): container finished" podID="ca230894-91a9-4e27-8fbf-a23be4cdade4" containerID="8209766e26277d35c339c7fd028f43304a9aeb42e25480c1ee0f8278f634b9be" exitCode=0 Nov 25 23:02:46 crc kubenswrapper[5045]: I1125 23:02:46.777591 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v82tk" event={"ID":"ca230894-91a9-4e27-8fbf-a23be4cdade4","Type":"ContainerDied","Data":"8209766e26277d35c339c7fd028f43304a9aeb42e25480c1ee0f8278f634b9be"} Nov 25 23:02:46 crc kubenswrapper[5045]: I1125 23:02:46.791633 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wtczn" event={"ID":"ac4a3911-99ba-425c-ae87-a4bc36dfb406","Type":"ContainerStarted","Data":"cb44da52a4d9b8bcb063e59e8c893ab5a9c0f42b868282ee341415f7fa00d137"} Nov 25 23:02:46 crc kubenswrapper[5045]: I1125 23:02:46.796850 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wzn6m" podStartSLOduration=2.73106224 podStartE2EDuration="1m2.796829037s" podCreationTimestamp="2025-11-25 23:01:44 +0000 UTC" firstStartedPulling="2025-11-25 23:01:46.212646886 +0000 UTC m=+162.570305998" lastFinishedPulling="2025-11-25 23:02:46.278413683 +0000 UTC m=+222.636072795" observedRunningTime="2025-11-25 23:02:46.795366761 +0000 UTC m=+223.153025873" watchObservedRunningTime="2025-11-25 23:02:46.796829037 +0000 UTC m=+223.154488169" Nov 25 23:02:46 crc kubenswrapper[5045]: I1125 23:02:46.823550 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-r4w4p" Nov 25 23:02:46 crc kubenswrapper[5045]: I1125 23:02:46.837908 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wtczn" podStartSLOduration=2.834385249 podStartE2EDuration="1m2.837888732s" podCreationTimestamp="2025-11-25 23:01:44 +0000 UTC" firstStartedPulling="2025-11-25 23:01:46.20364526 +0000 UTC m=+162.561304372" lastFinishedPulling="2025-11-25 23:02:46.207148743 +0000 UTC m=+222.564807855" observedRunningTime="2025-11-25 23:02:46.834064062 +0000 UTC m=+223.191723174" watchObservedRunningTime="2025-11-25 23:02:46.837888732 +0000 UTC m=+223.195547844" Nov 25 23:02:46 crc kubenswrapper[5045]: I1125 23:02:46.872669 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-r4w4p" Nov 25 23:02:47 crc kubenswrapper[5045]: I1125 23:02:47.050261 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9srj4"] Nov 25 23:02:47 crc kubenswrapper[5045]: I1125 23:02:47.796790 5045 generic.go:334] "Generic (PLEG): container finished" podID="571187eb-51e5-40d8-83b3-2295535de7e6" containerID="b9a2a865d4409d016ee3770e3b5eacd69f918af1c6ae25ade9d8c1dfe59e8eac" exitCode=0 Nov 25 23:02:47 crc kubenswrapper[5045]: I1125 23:02:47.796827 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h2rl5" event={"ID":"571187eb-51e5-40d8-83b3-2295535de7e6","Type":"ContainerDied","Data":"b9a2a865d4409d016ee3770e3b5eacd69f918af1c6ae25ade9d8c1dfe59e8eac"} Nov 25 23:02:47 crc kubenswrapper[5045]: I1125 23:02:47.800223 5045 generic.go:334] "Generic (PLEG): container finished" podID="cc573acb-eee1-4849-967f-fd1b253b640f" containerID="bdc33d5726221e795b79464ad5a342b34c9cdc4a40d800233cb7502c541369d5" exitCode=0 Nov 25 23:02:47 crc kubenswrapper[5045]: I1125 23:02:47.800290 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cntjg" event={"ID":"cc573acb-eee1-4849-967f-fd1b253b640f","Type":"ContainerDied","Data":"bdc33d5726221e795b79464ad5a342b34c9cdc4a40d800233cb7502c541369d5"} Nov 25 23:02:47 crc kubenswrapper[5045]: I1125 23:02:47.804263 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v82tk" event={"ID":"ca230894-91a9-4e27-8fbf-a23be4cdade4","Type":"ContainerStarted","Data":"a7f75ca1473aa5d4880b0527a7857cace3001721d7e1cf470be0859b08d7f602"} Nov 25 23:02:47 crc kubenswrapper[5045]: I1125 23:02:47.804524 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9srj4" podUID="d88d2d59-1cfc-48e3-99c0-fb45dca3f290" containerName="registry-server" containerID="cri-o://fe9db755c4c44b013577439cc63e44330f3f8a5c60bd1f53fea7df490e38475f" gracePeriod=2 Nov 25 23:02:47 crc kubenswrapper[5045]: I1125 23:02:47.839058 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-v82tk" podStartSLOduration=2.808434576 podStartE2EDuration="1m0.839041155s" podCreationTimestamp="2025-11-25 23:01:47 +0000 UTC" firstStartedPulling="2025-11-25 23:01:49.307795387 +0000 UTC m=+165.665454499" lastFinishedPulling="2025-11-25 23:02:47.338401966 +0000 UTC m=+223.696061078" observedRunningTime="2025-11-25 23:02:47.836155235 +0000 UTC m=+224.193814347" watchObservedRunningTime="2025-11-25 23:02:47.839041155 +0000 UTC m=+224.196700267" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.152606 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-v82tk" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.152886 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-v82tk" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.183203 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9srj4" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.286472 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d88d2d59-1cfc-48e3-99c0-fb45dca3f290-utilities\") pod \"d88d2d59-1cfc-48e3-99c0-fb45dca3f290\" (UID: \"d88d2d59-1cfc-48e3-99c0-fb45dca3f290\") " Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.286825 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d88d2d59-1cfc-48e3-99c0-fb45dca3f290-catalog-content\") pod \"d88d2d59-1cfc-48e3-99c0-fb45dca3f290\" (UID: \"d88d2d59-1cfc-48e3-99c0-fb45dca3f290\") " Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.287029 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kkkmn\" (UniqueName: \"kubernetes.io/projected/d88d2d59-1cfc-48e3-99c0-fb45dca3f290-kube-api-access-kkkmn\") pod \"d88d2d59-1cfc-48e3-99c0-fb45dca3f290\" (UID: \"d88d2d59-1cfc-48e3-99c0-fb45dca3f290\") " Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.287347 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d88d2d59-1cfc-48e3-99c0-fb45dca3f290-utilities" (OuterVolumeSpecName: "utilities") pod "d88d2d59-1cfc-48e3-99c0-fb45dca3f290" (UID: "d88d2d59-1cfc-48e3-99c0-fb45dca3f290"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.296958 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d88d2d59-1cfc-48e3-99c0-fb45dca3f290-kube-api-access-kkkmn" (OuterVolumeSpecName: "kube-api-access-kkkmn") pod "d88d2d59-1cfc-48e3-99c0-fb45dca3f290" (UID: "d88d2d59-1cfc-48e3-99c0-fb45dca3f290"). InnerVolumeSpecName "kube-api-access-kkkmn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.359959 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d88d2d59-1cfc-48e3-99c0-fb45dca3f290-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d88d2d59-1cfc-48e3-99c0-fb45dca3f290" (UID: "d88d2d59-1cfc-48e3-99c0-fb45dca3f290"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.387792 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kkkmn\" (UniqueName: \"kubernetes.io/projected/d88d2d59-1cfc-48e3-99c0-fb45dca3f290-kube-api-access-kkkmn\") on node \"crc\" DevicePath \"\"" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.387819 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d88d2d59-1cfc-48e3-99c0-fb45dca3f290-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.387829 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d88d2d59-1cfc-48e3-99c0-fb45dca3f290-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.819243 5045 generic.go:334] "Generic (PLEG): container finished" podID="d88d2d59-1cfc-48e3-99c0-fb45dca3f290" containerID="fe9db755c4c44b013577439cc63e44330f3f8a5c60bd1f53fea7df490e38475f" exitCode=0 Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.819294 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9srj4" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.819313 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9srj4" event={"ID":"d88d2d59-1cfc-48e3-99c0-fb45dca3f290","Type":"ContainerDied","Data":"fe9db755c4c44b013577439cc63e44330f3f8a5c60bd1f53fea7df490e38475f"} Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.819671 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9srj4" event={"ID":"d88d2d59-1cfc-48e3-99c0-fb45dca3f290","Type":"ContainerDied","Data":"466145bd76b1b5a2cc8b5b85e4407759289435ba601ac11bf95304e051e5cba2"} Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.819692 5045 scope.go:117] "RemoveContainer" containerID="fe9db755c4c44b013577439cc63e44330f3f8a5c60bd1f53fea7df490e38475f" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.823909 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h2rl5" event={"ID":"571187eb-51e5-40d8-83b3-2295535de7e6","Type":"ContainerStarted","Data":"2ed655826a97f244eed78bfe0f91c46fc389f9e48e654dd8539a61ec52c90fdf"} Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.830543 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cntjg" event={"ID":"cc573acb-eee1-4849-967f-fd1b253b640f","Type":"ContainerStarted","Data":"edcf934a61d1c587508cb870d80b46998eb1e05060cd6ae873f684440ca755a5"} Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.845202 5045 scope.go:117] "RemoveContainer" containerID="ca4d562fdf1512b9ef61b82ddabfe50d36d7fe662037e42c43e2886c268b8c65" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.851195 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9srj4"] Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.857267 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9srj4"] Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.862612 5045 scope.go:117] "RemoveContainer" containerID="eca8dd3c5caf92a706cb07609b1f3e07ee61c08d5f6e9b7dde339ac3bc33bf49" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.867057 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-h2rl5" podStartSLOduration=1.940880266 podStartE2EDuration="1m2.867044748s" podCreationTimestamp="2025-11-25 23:01:46 +0000 UTC" firstStartedPulling="2025-11-25 23:01:47.264696214 +0000 UTC m=+163.622355326" lastFinishedPulling="2025-11-25 23:02:48.190860696 +0000 UTC m=+224.548519808" observedRunningTime="2025-11-25 23:02:48.866035026 +0000 UTC m=+225.223694139" watchObservedRunningTime="2025-11-25 23:02:48.867044748 +0000 UTC m=+225.224703860" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.886859 5045 scope.go:117] "RemoveContainer" containerID="fe9db755c4c44b013577439cc63e44330f3f8a5c60bd1f53fea7df490e38475f" Nov 25 23:02:48 crc kubenswrapper[5045]: E1125 23:02:48.887262 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe9db755c4c44b013577439cc63e44330f3f8a5c60bd1f53fea7df490e38475f\": container with ID starting with fe9db755c4c44b013577439cc63e44330f3f8a5c60bd1f53fea7df490e38475f not found: ID does not exist" containerID="fe9db755c4c44b013577439cc63e44330f3f8a5c60bd1f53fea7df490e38475f" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.887289 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe9db755c4c44b013577439cc63e44330f3f8a5c60bd1f53fea7df490e38475f"} err="failed to get container status \"fe9db755c4c44b013577439cc63e44330f3f8a5c60bd1f53fea7df490e38475f\": rpc error: code = NotFound desc = could not find container \"fe9db755c4c44b013577439cc63e44330f3f8a5c60bd1f53fea7df490e38475f\": container with ID starting with fe9db755c4c44b013577439cc63e44330f3f8a5c60bd1f53fea7df490e38475f not found: ID does not exist" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.887308 5045 scope.go:117] "RemoveContainer" containerID="ca4d562fdf1512b9ef61b82ddabfe50d36d7fe662037e42c43e2886c268b8c65" Nov 25 23:02:48 crc kubenswrapper[5045]: E1125 23:02:48.887637 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca4d562fdf1512b9ef61b82ddabfe50d36d7fe662037e42c43e2886c268b8c65\": container with ID starting with ca4d562fdf1512b9ef61b82ddabfe50d36d7fe662037e42c43e2886c268b8c65 not found: ID does not exist" containerID="ca4d562fdf1512b9ef61b82ddabfe50d36d7fe662037e42c43e2886c268b8c65" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.887668 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca4d562fdf1512b9ef61b82ddabfe50d36d7fe662037e42c43e2886c268b8c65"} err="failed to get container status \"ca4d562fdf1512b9ef61b82ddabfe50d36d7fe662037e42c43e2886c268b8c65\": rpc error: code = NotFound desc = could not find container \"ca4d562fdf1512b9ef61b82ddabfe50d36d7fe662037e42c43e2886c268b8c65\": container with ID starting with ca4d562fdf1512b9ef61b82ddabfe50d36d7fe662037e42c43e2886c268b8c65 not found: ID does not exist" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.887694 5045 scope.go:117] "RemoveContainer" containerID="eca8dd3c5caf92a706cb07609b1f3e07ee61c08d5f6e9b7dde339ac3bc33bf49" Nov 25 23:02:48 crc kubenswrapper[5045]: E1125 23:02:48.888068 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eca8dd3c5caf92a706cb07609b1f3e07ee61c08d5f6e9b7dde339ac3bc33bf49\": container with ID starting with eca8dd3c5caf92a706cb07609b1f3e07ee61c08d5f6e9b7dde339ac3bc33bf49 not found: ID does not exist" containerID="eca8dd3c5caf92a706cb07609b1f3e07ee61c08d5f6e9b7dde339ac3bc33bf49" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.888094 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eca8dd3c5caf92a706cb07609b1f3e07ee61c08d5f6e9b7dde339ac3bc33bf49"} err="failed to get container status \"eca8dd3c5caf92a706cb07609b1f3e07ee61c08d5f6e9b7dde339ac3bc33bf49\": rpc error: code = NotFound desc = could not find container \"eca8dd3c5caf92a706cb07609b1f3e07ee61c08d5f6e9b7dde339ac3bc33bf49\": container with ID starting with eca8dd3c5caf92a706cb07609b1f3e07ee61c08d5f6e9b7dde339ac3bc33bf49 not found: ID does not exist" Nov 25 23:02:48 crc kubenswrapper[5045]: I1125 23:02:48.889287 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-cntjg" podStartSLOduration=3.039991662 podStartE2EDuration="1m1.889277674s" podCreationTimestamp="2025-11-25 23:01:47 +0000 UTC" firstStartedPulling="2025-11-25 23:01:49.356937175 +0000 UTC m=+165.714596287" lastFinishedPulling="2025-11-25 23:02:48.206223187 +0000 UTC m=+224.563882299" observedRunningTime="2025-11-25 23:02:48.885880108 +0000 UTC m=+225.243539220" watchObservedRunningTime="2025-11-25 23:02:48.889277674 +0000 UTC m=+225.246936786" Nov 25 23:02:49 crc kubenswrapper[5045]: I1125 23:02:49.208719 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-v82tk" podUID="ca230894-91a9-4e27-8fbf-a23be4cdade4" containerName="registry-server" probeResult="failure" output=< Nov 25 23:02:49 crc kubenswrapper[5045]: timeout: failed to connect service ":50051" within 1s Nov 25 23:02:49 crc kubenswrapper[5045]: > Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.403945 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d88d2d59-1cfc-48e3-99c0-fb45dca3f290" path="/var/lib/kubelet/pods/d88d2d59-1cfc-48e3-99c0-fb45dca3f290/volumes" Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.439186 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-r4w4p"] Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.439392 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-r4w4p" podUID="28263e0a-9ac5-487e-9c95-8c2932ddcf74" containerName="registry-server" containerID="cri-o://c63074a0b61de80e034a6e1a599bc508409b7d3490f7c195139ec646c20c6bdb" gracePeriod=2 Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.752332 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r4w4p" Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.821646 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28263e0a-9ac5-487e-9c95-8c2932ddcf74-utilities\") pod \"28263e0a-9ac5-487e-9c95-8c2932ddcf74\" (UID: \"28263e0a-9ac5-487e-9c95-8c2932ddcf74\") " Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.822444 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28263e0a-9ac5-487e-9c95-8c2932ddcf74-utilities" (OuterVolumeSpecName: "utilities") pod "28263e0a-9ac5-487e-9c95-8c2932ddcf74" (UID: "28263e0a-9ac5-487e-9c95-8c2932ddcf74"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.822559 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28263e0a-9ac5-487e-9c95-8c2932ddcf74-catalog-content\") pod \"28263e0a-9ac5-487e-9c95-8c2932ddcf74\" (UID: \"28263e0a-9ac5-487e-9c95-8c2932ddcf74\") " Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.829894 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rlmd\" (UniqueName: \"kubernetes.io/projected/28263e0a-9ac5-487e-9c95-8c2932ddcf74-kube-api-access-6rlmd\") pod \"28263e0a-9ac5-487e-9c95-8c2932ddcf74\" (UID: \"28263e0a-9ac5-487e-9c95-8c2932ddcf74\") " Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.830287 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28263e0a-9ac5-487e-9c95-8c2932ddcf74-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.835597 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28263e0a-9ac5-487e-9c95-8c2932ddcf74-kube-api-access-6rlmd" (OuterVolumeSpecName: "kube-api-access-6rlmd") pod "28263e0a-9ac5-487e-9c95-8c2932ddcf74" (UID: "28263e0a-9ac5-487e-9c95-8c2932ddcf74"). InnerVolumeSpecName "kube-api-access-6rlmd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.853782 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28263e0a-9ac5-487e-9c95-8c2932ddcf74-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "28263e0a-9ac5-487e-9c95-8c2932ddcf74" (UID: "28263e0a-9ac5-487e-9c95-8c2932ddcf74"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.854969 5045 generic.go:334] "Generic (PLEG): container finished" podID="28263e0a-9ac5-487e-9c95-8c2932ddcf74" containerID="c63074a0b61de80e034a6e1a599bc508409b7d3490f7c195139ec646c20c6bdb" exitCode=0 Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.855006 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r4w4p" event={"ID":"28263e0a-9ac5-487e-9c95-8c2932ddcf74","Type":"ContainerDied","Data":"c63074a0b61de80e034a6e1a599bc508409b7d3490f7c195139ec646c20c6bdb"} Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.855033 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r4w4p" event={"ID":"28263e0a-9ac5-487e-9c95-8c2932ddcf74","Type":"ContainerDied","Data":"7998ffe80f2dc3c0ea98b4f9bbcd7f66a31ccb0cbc704f7063c0ccd2feca3938"} Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.855048 5045 scope.go:117] "RemoveContainer" containerID="c63074a0b61de80e034a6e1a599bc508409b7d3490f7c195139ec646c20c6bdb" Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.855143 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r4w4p" Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.881362 5045 scope.go:117] "RemoveContainer" containerID="3cb03c96623ef86aa0b8e0fc56d5ea9257b4ef0d3f28ba9f1b7b69e23fb5313f" Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.882583 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-r4w4p"] Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.884565 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-r4w4p"] Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.895973 5045 scope.go:117] "RemoveContainer" containerID="6830e2e9ef99019a27b696a24a3e3e36789c3ea87003a23abbcaceefc510dffe" Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.912902 5045 scope.go:117] "RemoveContainer" containerID="c63074a0b61de80e034a6e1a599bc508409b7d3490f7c195139ec646c20c6bdb" Nov 25 23:02:50 crc kubenswrapper[5045]: E1125 23:02:50.913400 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c63074a0b61de80e034a6e1a599bc508409b7d3490f7c195139ec646c20c6bdb\": container with ID starting with c63074a0b61de80e034a6e1a599bc508409b7d3490f7c195139ec646c20c6bdb not found: ID does not exist" containerID="c63074a0b61de80e034a6e1a599bc508409b7d3490f7c195139ec646c20c6bdb" Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.913438 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c63074a0b61de80e034a6e1a599bc508409b7d3490f7c195139ec646c20c6bdb"} err="failed to get container status \"c63074a0b61de80e034a6e1a599bc508409b7d3490f7c195139ec646c20c6bdb\": rpc error: code = NotFound desc = could not find container \"c63074a0b61de80e034a6e1a599bc508409b7d3490f7c195139ec646c20c6bdb\": container with ID starting with c63074a0b61de80e034a6e1a599bc508409b7d3490f7c195139ec646c20c6bdb not found: ID does not exist" Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.913465 5045 scope.go:117] "RemoveContainer" containerID="3cb03c96623ef86aa0b8e0fc56d5ea9257b4ef0d3f28ba9f1b7b69e23fb5313f" Nov 25 23:02:50 crc kubenswrapper[5045]: E1125 23:02:50.913746 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3cb03c96623ef86aa0b8e0fc56d5ea9257b4ef0d3f28ba9f1b7b69e23fb5313f\": container with ID starting with 3cb03c96623ef86aa0b8e0fc56d5ea9257b4ef0d3f28ba9f1b7b69e23fb5313f not found: ID does not exist" containerID="3cb03c96623ef86aa0b8e0fc56d5ea9257b4ef0d3f28ba9f1b7b69e23fb5313f" Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.913778 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3cb03c96623ef86aa0b8e0fc56d5ea9257b4ef0d3f28ba9f1b7b69e23fb5313f"} err="failed to get container status \"3cb03c96623ef86aa0b8e0fc56d5ea9257b4ef0d3f28ba9f1b7b69e23fb5313f\": rpc error: code = NotFound desc = could not find container \"3cb03c96623ef86aa0b8e0fc56d5ea9257b4ef0d3f28ba9f1b7b69e23fb5313f\": container with ID starting with 3cb03c96623ef86aa0b8e0fc56d5ea9257b4ef0d3f28ba9f1b7b69e23fb5313f not found: ID does not exist" Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.913796 5045 scope.go:117] "RemoveContainer" containerID="6830e2e9ef99019a27b696a24a3e3e36789c3ea87003a23abbcaceefc510dffe" Nov 25 23:02:50 crc kubenswrapper[5045]: E1125 23:02:50.914132 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6830e2e9ef99019a27b696a24a3e3e36789c3ea87003a23abbcaceefc510dffe\": container with ID starting with 6830e2e9ef99019a27b696a24a3e3e36789c3ea87003a23abbcaceefc510dffe not found: ID does not exist" containerID="6830e2e9ef99019a27b696a24a3e3e36789c3ea87003a23abbcaceefc510dffe" Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.914165 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6830e2e9ef99019a27b696a24a3e3e36789c3ea87003a23abbcaceefc510dffe"} err="failed to get container status \"6830e2e9ef99019a27b696a24a3e3e36789c3ea87003a23abbcaceefc510dffe\": rpc error: code = NotFound desc = could not find container \"6830e2e9ef99019a27b696a24a3e3e36789c3ea87003a23abbcaceefc510dffe\": container with ID starting with 6830e2e9ef99019a27b696a24a3e3e36789c3ea87003a23abbcaceefc510dffe not found: ID does not exist" Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.931698 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6rlmd\" (UniqueName: \"kubernetes.io/projected/28263e0a-9ac5-487e-9c95-8c2932ddcf74-kube-api-access-6rlmd\") on node \"crc\" DevicePath \"\"" Nov 25 23:02:50 crc kubenswrapper[5045]: I1125 23:02:50.931738 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28263e0a-9ac5-487e-9c95-8c2932ddcf74-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:02:52 crc kubenswrapper[5045]: I1125 23:02:52.402900 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28263e0a-9ac5-487e-9c95-8c2932ddcf74" path="/var/lib/kubelet/pods/28263e0a-9ac5-487e-9c95-8c2932ddcf74/volumes" Nov 25 23:02:54 crc kubenswrapper[5045]: I1125 23:02:54.351577 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wzn6m" Nov 25 23:02:54 crc kubenswrapper[5045]: I1125 23:02:54.351982 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wzn6m" Nov 25 23:02:54 crc kubenswrapper[5045]: I1125 23:02:54.427769 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wzn6m" Nov 25 23:02:54 crc kubenswrapper[5045]: I1125 23:02:54.590025 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-frnbt" Nov 25 23:02:54 crc kubenswrapper[5045]: I1125 23:02:54.770629 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wtczn" Nov 25 23:02:54 crc kubenswrapper[5045]: I1125 23:02:54.770745 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wtczn" Nov 25 23:02:54 crc kubenswrapper[5045]: I1125 23:02:54.839295 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wtczn" Nov 25 23:02:54 crc kubenswrapper[5045]: I1125 23:02:54.919004 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wzn6m" Nov 25 23:02:54 crc kubenswrapper[5045]: I1125 23:02:54.937085 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wtczn" Nov 25 23:02:56 crc kubenswrapper[5045]: I1125 23:02:56.037500 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wtczn"] Nov 25 23:02:56 crc kubenswrapper[5045]: I1125 23:02:56.342913 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-h2rl5" Nov 25 23:02:56 crc kubenswrapper[5045]: I1125 23:02:56.342997 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-h2rl5" Nov 25 23:02:56 crc kubenswrapper[5045]: I1125 23:02:56.386393 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-h2rl5" Nov 25 23:02:56 crc kubenswrapper[5045]: I1125 23:02:56.959452 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-h2rl5" Nov 25 23:02:57 crc kubenswrapper[5045]: I1125 23:02:57.796762 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-cntjg" Nov 25 23:02:57 crc kubenswrapper[5045]: I1125 23:02:57.798200 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-cntjg" Nov 25 23:02:57 crc kubenswrapper[5045]: I1125 23:02:57.883759 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-cntjg" Nov 25 23:02:57 crc kubenswrapper[5045]: I1125 23:02:57.909158 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wtczn" podUID="ac4a3911-99ba-425c-ae87-a4bc36dfb406" containerName="registry-server" containerID="cri-o://cb44da52a4d9b8bcb063e59e8c893ab5a9c0f42b868282ee341415f7fa00d137" gracePeriod=2 Nov 25 23:02:57 crc kubenswrapper[5045]: I1125 23:02:57.956548 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-cntjg" Nov 25 23:02:58 crc kubenswrapper[5045]: I1125 23:02:58.210372 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-v82tk" Nov 25 23:02:58 crc kubenswrapper[5045]: I1125 23:02:58.268467 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-v82tk" Nov 25 23:02:58 crc kubenswrapper[5045]: I1125 23:02:58.917799 5045 generic.go:334] "Generic (PLEG): container finished" podID="ac4a3911-99ba-425c-ae87-a4bc36dfb406" containerID="cb44da52a4d9b8bcb063e59e8c893ab5a9c0f42b868282ee341415f7fa00d137" exitCode=0 Nov 25 23:02:58 crc kubenswrapper[5045]: I1125 23:02:58.917860 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wtczn" event={"ID":"ac4a3911-99ba-425c-ae87-a4bc36dfb406","Type":"ContainerDied","Data":"cb44da52a4d9b8bcb063e59e8c893ab5a9c0f42b868282ee341415f7fa00d137"} Nov 25 23:02:59 crc kubenswrapper[5045]: I1125 23:02:59.623865 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wtczn" Nov 25 23:02:59 crc kubenswrapper[5045]: I1125 23:02:59.665253 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac4a3911-99ba-425c-ae87-a4bc36dfb406-catalog-content\") pod \"ac4a3911-99ba-425c-ae87-a4bc36dfb406\" (UID: \"ac4a3911-99ba-425c-ae87-a4bc36dfb406\") " Nov 25 23:02:59 crc kubenswrapper[5045]: I1125 23:02:59.665301 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac4a3911-99ba-425c-ae87-a4bc36dfb406-utilities\") pod \"ac4a3911-99ba-425c-ae87-a4bc36dfb406\" (UID: \"ac4a3911-99ba-425c-ae87-a4bc36dfb406\") " Nov 25 23:02:59 crc kubenswrapper[5045]: I1125 23:02:59.665503 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pf7ns\" (UniqueName: \"kubernetes.io/projected/ac4a3911-99ba-425c-ae87-a4bc36dfb406-kube-api-access-pf7ns\") pod \"ac4a3911-99ba-425c-ae87-a4bc36dfb406\" (UID: \"ac4a3911-99ba-425c-ae87-a4bc36dfb406\") " Nov 25 23:02:59 crc kubenswrapper[5045]: I1125 23:02:59.666899 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac4a3911-99ba-425c-ae87-a4bc36dfb406-utilities" (OuterVolumeSpecName: "utilities") pod "ac4a3911-99ba-425c-ae87-a4bc36dfb406" (UID: "ac4a3911-99ba-425c-ae87-a4bc36dfb406"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:02:59 crc kubenswrapper[5045]: I1125 23:02:59.674916 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac4a3911-99ba-425c-ae87-a4bc36dfb406-kube-api-access-pf7ns" (OuterVolumeSpecName: "kube-api-access-pf7ns") pod "ac4a3911-99ba-425c-ae87-a4bc36dfb406" (UID: "ac4a3911-99ba-425c-ae87-a4bc36dfb406"). InnerVolumeSpecName "kube-api-access-pf7ns". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:02:59 crc kubenswrapper[5045]: I1125 23:02:59.733020 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac4a3911-99ba-425c-ae87-a4bc36dfb406-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ac4a3911-99ba-425c-ae87-a4bc36dfb406" (UID: "ac4a3911-99ba-425c-ae87-a4bc36dfb406"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:02:59 crc kubenswrapper[5045]: I1125 23:02:59.766449 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pf7ns\" (UniqueName: \"kubernetes.io/projected/ac4a3911-99ba-425c-ae87-a4bc36dfb406-kube-api-access-pf7ns\") on node \"crc\" DevicePath \"\"" Nov 25 23:02:59 crc kubenswrapper[5045]: I1125 23:02:59.766489 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac4a3911-99ba-425c-ae87-a4bc36dfb406-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:02:59 crc kubenswrapper[5045]: I1125 23:02:59.766505 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac4a3911-99ba-425c-ae87-a4bc36dfb406-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:02:59 crc kubenswrapper[5045]: I1125 23:02:59.928466 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wtczn" event={"ID":"ac4a3911-99ba-425c-ae87-a4bc36dfb406","Type":"ContainerDied","Data":"91d30879c720e4f02d48b325907af8a6fc3fa722959eae1e51908f79f39f3020"} Nov 25 23:02:59 crc kubenswrapper[5045]: I1125 23:02:59.928564 5045 scope.go:117] "RemoveContainer" containerID="cb44da52a4d9b8bcb063e59e8c893ab5a9c0f42b868282ee341415f7fa00d137" Nov 25 23:02:59 crc kubenswrapper[5045]: I1125 23:02:59.928567 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wtczn" Nov 25 23:02:59 crc kubenswrapper[5045]: I1125 23:02:59.954444 5045 scope.go:117] "RemoveContainer" containerID="9b7f42cb336590a08a3e5eeed49381a796f888c0fe11336756181a215932f9b4" Nov 25 23:02:59 crc kubenswrapper[5045]: I1125 23:02:59.974374 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wtczn"] Nov 25 23:02:59 crc kubenswrapper[5045]: I1125 23:02:59.979855 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wtczn"] Nov 25 23:03:00 crc kubenswrapper[5045]: I1125 23:03:00.005047 5045 scope.go:117] "RemoveContainer" containerID="5bbd4eaf1ef42f40b027385cd063b932f664d7abb6c6e19b8a5f9165e7e74077" Nov 25 23:03:00 crc kubenswrapper[5045]: I1125 23:03:00.410425 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac4a3911-99ba-425c-ae87-a4bc36dfb406" path="/var/lib/kubelet/pods/ac4a3911-99ba-425c-ae87-a4bc36dfb406/volumes" Nov 25 23:03:00 crc kubenswrapper[5045]: I1125 23:03:00.642542 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v82tk"] Nov 25 23:03:00 crc kubenswrapper[5045]: I1125 23:03:00.643019 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-v82tk" podUID="ca230894-91a9-4e27-8fbf-a23be4cdade4" containerName="registry-server" containerID="cri-o://a7f75ca1473aa5d4880b0527a7857cace3001721d7e1cf470be0859b08d7f602" gracePeriod=2 Nov 25 23:03:01 crc kubenswrapper[5045]: I1125 23:03:01.747395 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v82tk" Nov 25 23:03:01 crc kubenswrapper[5045]: I1125 23:03:01.904635 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6cld2\" (UniqueName: \"kubernetes.io/projected/ca230894-91a9-4e27-8fbf-a23be4cdade4-kube-api-access-6cld2\") pod \"ca230894-91a9-4e27-8fbf-a23be4cdade4\" (UID: \"ca230894-91a9-4e27-8fbf-a23be4cdade4\") " Nov 25 23:03:01 crc kubenswrapper[5045]: I1125 23:03:01.904845 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca230894-91a9-4e27-8fbf-a23be4cdade4-catalog-content\") pod \"ca230894-91a9-4e27-8fbf-a23be4cdade4\" (UID: \"ca230894-91a9-4e27-8fbf-a23be4cdade4\") " Nov 25 23:03:01 crc kubenswrapper[5045]: I1125 23:03:01.905960 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca230894-91a9-4e27-8fbf-a23be4cdade4-utilities\") pod \"ca230894-91a9-4e27-8fbf-a23be4cdade4\" (UID: \"ca230894-91a9-4e27-8fbf-a23be4cdade4\") " Nov 25 23:03:01 crc kubenswrapper[5045]: I1125 23:03:01.907469 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca230894-91a9-4e27-8fbf-a23be4cdade4-utilities" (OuterVolumeSpecName: "utilities") pod "ca230894-91a9-4e27-8fbf-a23be4cdade4" (UID: "ca230894-91a9-4e27-8fbf-a23be4cdade4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:03:01 crc kubenswrapper[5045]: I1125 23:03:01.910023 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca230894-91a9-4e27-8fbf-a23be4cdade4-kube-api-access-6cld2" (OuterVolumeSpecName: "kube-api-access-6cld2") pod "ca230894-91a9-4e27-8fbf-a23be4cdade4" (UID: "ca230894-91a9-4e27-8fbf-a23be4cdade4"). InnerVolumeSpecName "kube-api-access-6cld2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:03:01 crc kubenswrapper[5045]: I1125 23:03:01.945971 5045 generic.go:334] "Generic (PLEG): container finished" podID="ca230894-91a9-4e27-8fbf-a23be4cdade4" containerID="a7f75ca1473aa5d4880b0527a7857cace3001721d7e1cf470be0859b08d7f602" exitCode=0 Nov 25 23:03:01 crc kubenswrapper[5045]: I1125 23:03:01.946046 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v82tk" event={"ID":"ca230894-91a9-4e27-8fbf-a23be4cdade4","Type":"ContainerDied","Data":"a7f75ca1473aa5d4880b0527a7857cace3001721d7e1cf470be0859b08d7f602"} Nov 25 23:03:01 crc kubenswrapper[5045]: I1125 23:03:01.946051 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v82tk" Nov 25 23:03:01 crc kubenswrapper[5045]: I1125 23:03:01.946133 5045 scope.go:117] "RemoveContainer" containerID="a7f75ca1473aa5d4880b0527a7857cace3001721d7e1cf470be0859b08d7f602" Nov 25 23:03:01 crc kubenswrapper[5045]: I1125 23:03:01.946112 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v82tk" event={"ID":"ca230894-91a9-4e27-8fbf-a23be4cdade4","Type":"ContainerDied","Data":"066d7db43790ee9b649d1cd1efbf21545b7ac03ac71e07f31c79319b09298204"} Nov 25 23:03:01 crc kubenswrapper[5045]: I1125 23:03:01.971172 5045 scope.go:117] "RemoveContainer" containerID="8209766e26277d35c339c7fd028f43304a9aeb42e25480c1ee0f8278f634b9be" Nov 25 23:03:01 crc kubenswrapper[5045]: I1125 23:03:01.992128 5045 scope.go:117] "RemoveContainer" containerID="6148c820bd54d7e65c6b852ef60bdad1e489928ce9ee27ca10f30cf7f38ac1fe" Nov 25 23:03:02 crc kubenswrapper[5045]: I1125 23:03:02.008268 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca230894-91a9-4e27-8fbf-a23be4cdade4-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:02 crc kubenswrapper[5045]: I1125 23:03:02.008309 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6cld2\" (UniqueName: \"kubernetes.io/projected/ca230894-91a9-4e27-8fbf-a23be4cdade4-kube-api-access-6cld2\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:02 crc kubenswrapper[5045]: I1125 23:03:02.019678 5045 scope.go:117] "RemoveContainer" containerID="a7f75ca1473aa5d4880b0527a7857cace3001721d7e1cf470be0859b08d7f602" Nov 25 23:03:02 crc kubenswrapper[5045]: E1125 23:03:02.020546 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7f75ca1473aa5d4880b0527a7857cace3001721d7e1cf470be0859b08d7f602\": container with ID starting with a7f75ca1473aa5d4880b0527a7857cace3001721d7e1cf470be0859b08d7f602 not found: ID does not exist" containerID="a7f75ca1473aa5d4880b0527a7857cace3001721d7e1cf470be0859b08d7f602" Nov 25 23:03:02 crc kubenswrapper[5045]: I1125 23:03:02.020616 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7f75ca1473aa5d4880b0527a7857cace3001721d7e1cf470be0859b08d7f602"} err="failed to get container status \"a7f75ca1473aa5d4880b0527a7857cace3001721d7e1cf470be0859b08d7f602\": rpc error: code = NotFound desc = could not find container \"a7f75ca1473aa5d4880b0527a7857cace3001721d7e1cf470be0859b08d7f602\": container with ID starting with a7f75ca1473aa5d4880b0527a7857cace3001721d7e1cf470be0859b08d7f602 not found: ID does not exist" Nov 25 23:03:02 crc kubenswrapper[5045]: I1125 23:03:02.020669 5045 scope.go:117] "RemoveContainer" containerID="8209766e26277d35c339c7fd028f43304a9aeb42e25480c1ee0f8278f634b9be" Nov 25 23:03:02 crc kubenswrapper[5045]: E1125 23:03:02.021826 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8209766e26277d35c339c7fd028f43304a9aeb42e25480c1ee0f8278f634b9be\": container with ID starting with 8209766e26277d35c339c7fd028f43304a9aeb42e25480c1ee0f8278f634b9be not found: ID does not exist" containerID="8209766e26277d35c339c7fd028f43304a9aeb42e25480c1ee0f8278f634b9be" Nov 25 23:03:02 crc kubenswrapper[5045]: I1125 23:03:02.021877 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8209766e26277d35c339c7fd028f43304a9aeb42e25480c1ee0f8278f634b9be"} err="failed to get container status \"8209766e26277d35c339c7fd028f43304a9aeb42e25480c1ee0f8278f634b9be\": rpc error: code = NotFound desc = could not find container \"8209766e26277d35c339c7fd028f43304a9aeb42e25480c1ee0f8278f634b9be\": container with ID starting with 8209766e26277d35c339c7fd028f43304a9aeb42e25480c1ee0f8278f634b9be not found: ID does not exist" Nov 25 23:03:02 crc kubenswrapper[5045]: I1125 23:03:02.021920 5045 scope.go:117] "RemoveContainer" containerID="6148c820bd54d7e65c6b852ef60bdad1e489928ce9ee27ca10f30cf7f38ac1fe" Nov 25 23:03:02 crc kubenswrapper[5045]: E1125 23:03:02.022387 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6148c820bd54d7e65c6b852ef60bdad1e489928ce9ee27ca10f30cf7f38ac1fe\": container with ID starting with 6148c820bd54d7e65c6b852ef60bdad1e489928ce9ee27ca10f30cf7f38ac1fe not found: ID does not exist" containerID="6148c820bd54d7e65c6b852ef60bdad1e489928ce9ee27ca10f30cf7f38ac1fe" Nov 25 23:03:02 crc kubenswrapper[5045]: I1125 23:03:02.022444 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6148c820bd54d7e65c6b852ef60bdad1e489928ce9ee27ca10f30cf7f38ac1fe"} err="failed to get container status \"6148c820bd54d7e65c6b852ef60bdad1e489928ce9ee27ca10f30cf7f38ac1fe\": rpc error: code = NotFound desc = could not find container \"6148c820bd54d7e65c6b852ef60bdad1e489928ce9ee27ca10f30cf7f38ac1fe\": container with ID starting with 6148c820bd54d7e65c6b852ef60bdad1e489928ce9ee27ca10f30cf7f38ac1fe not found: ID does not exist" Nov 25 23:03:02 crc kubenswrapper[5045]: I1125 23:03:02.032569 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca230894-91a9-4e27-8fbf-a23be4cdade4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ca230894-91a9-4e27-8fbf-a23be4cdade4" (UID: "ca230894-91a9-4e27-8fbf-a23be4cdade4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:03:02 crc kubenswrapper[5045]: I1125 23:03:02.109958 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca230894-91a9-4e27-8fbf-a23be4cdade4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:02 crc kubenswrapper[5045]: I1125 23:03:02.284838 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v82tk"] Nov 25 23:03:02 crc kubenswrapper[5045]: I1125 23:03:02.289820 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-v82tk"] Nov 25 23:03:02 crc kubenswrapper[5045]: I1125 23:03:02.406243 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca230894-91a9-4e27-8fbf-a23be4cdade4" path="/var/lib/kubelet/pods/ca230894-91a9-4e27-8fbf-a23be4cdade4/volumes" Nov 25 23:03:05 crc kubenswrapper[5045]: I1125 23:03:05.925445 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2btnr"] Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.374068 5045 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.375118 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879" gracePeriod=15 Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.375221 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b" gracePeriod=15 Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.375151 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478" gracePeriod=15 Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.375257 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161" gracePeriod=15 Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.375227 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8" gracePeriod=15 Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.376853 5045 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 23:03:15 crc kubenswrapper[5045]: E1125 23:03:15.377156 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377175 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 23:03:15 crc kubenswrapper[5045]: E1125 23:03:15.377194 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca230894-91a9-4e27-8fbf-a23be4cdade4" containerName="registry-server" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377207 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca230894-91a9-4e27-8fbf-a23be4cdade4" containerName="registry-server" Nov 25 23:03:15 crc kubenswrapper[5045]: E1125 23:03:15.377234 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d88d2d59-1cfc-48e3-99c0-fb45dca3f290" containerName="registry-server" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377252 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="d88d2d59-1cfc-48e3-99c0-fb45dca3f290" containerName="registry-server" Nov 25 23:03:15 crc kubenswrapper[5045]: E1125 23:03:15.377275 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377291 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 23:03:15 crc kubenswrapper[5045]: E1125 23:03:15.377311 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377323 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 23:03:15 crc kubenswrapper[5045]: E1125 23:03:15.377344 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d88d2d59-1cfc-48e3-99c0-fb45dca3f290" containerName="extract-utilities" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377356 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="d88d2d59-1cfc-48e3-99c0-fb45dca3f290" containerName="extract-utilities" Nov 25 23:03:15 crc kubenswrapper[5045]: E1125 23:03:15.377371 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377382 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 23:03:15 crc kubenswrapper[5045]: E1125 23:03:15.377397 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28263e0a-9ac5-487e-9c95-8c2932ddcf74" containerName="extract-utilities" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377409 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="28263e0a-9ac5-487e-9c95-8c2932ddcf74" containerName="extract-utilities" Nov 25 23:03:15 crc kubenswrapper[5045]: E1125 23:03:15.377422 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca230894-91a9-4e27-8fbf-a23be4cdade4" containerName="extract-utilities" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377434 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca230894-91a9-4e27-8fbf-a23be4cdade4" containerName="extract-utilities" Nov 25 23:03:15 crc kubenswrapper[5045]: E1125 23:03:15.377450 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d88d2d59-1cfc-48e3-99c0-fb45dca3f290" containerName="extract-content" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377463 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="d88d2d59-1cfc-48e3-99c0-fb45dca3f290" containerName="extract-content" Nov 25 23:03:15 crc kubenswrapper[5045]: E1125 23:03:15.377481 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac4a3911-99ba-425c-ae87-a4bc36dfb406" containerName="extract-content" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377494 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac4a3911-99ba-425c-ae87-a4bc36dfb406" containerName="extract-content" Nov 25 23:03:15 crc kubenswrapper[5045]: E1125 23:03:15.377510 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca230894-91a9-4e27-8fbf-a23be4cdade4" containerName="extract-content" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377523 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca230894-91a9-4e27-8fbf-a23be4cdade4" containerName="extract-content" Nov 25 23:03:15 crc kubenswrapper[5045]: E1125 23:03:15.377540 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28263e0a-9ac5-487e-9c95-8c2932ddcf74" containerName="registry-server" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377552 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="28263e0a-9ac5-487e-9c95-8c2932ddcf74" containerName="registry-server" Nov 25 23:03:15 crc kubenswrapper[5045]: E1125 23:03:15.377570 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377582 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 23:03:15 crc kubenswrapper[5045]: E1125 23:03:15.377596 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377607 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 23:03:15 crc kubenswrapper[5045]: E1125 23:03:15.377624 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377637 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 23:03:15 crc kubenswrapper[5045]: E1125 23:03:15.377656 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28263e0a-9ac5-487e-9c95-8c2932ddcf74" containerName="extract-content" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377668 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="28263e0a-9ac5-487e-9c95-8c2932ddcf74" containerName="extract-content" Nov 25 23:03:15 crc kubenswrapper[5045]: E1125 23:03:15.377685 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac4a3911-99ba-425c-ae87-a4bc36dfb406" containerName="extract-utilities" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377698 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac4a3911-99ba-425c-ae87-a4bc36dfb406" containerName="extract-utilities" Nov 25 23:03:15 crc kubenswrapper[5045]: E1125 23:03:15.377751 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac4a3911-99ba-425c-ae87-a4bc36dfb406" containerName="registry-server" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377768 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac4a3911-99ba-425c-ae87-a4bc36dfb406" containerName="registry-server" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377954 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377975 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.377989 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.378009 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca230894-91a9-4e27-8fbf-a23be4cdade4" containerName="registry-server" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.378030 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.378045 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.378060 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac4a3911-99ba-425c-ae87-a4bc36dfb406" containerName="registry-server" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.378073 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="d88d2d59-1cfc-48e3-99c0-fb45dca3f290" containerName="registry-server" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.378092 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="28263e0a-9ac5-487e-9c95-8c2932ddcf74" containerName="registry-server" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.378393 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.381341 5045 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.386095 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.392848 5045 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.499424 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.500191 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.500350 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.500397 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.500432 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.500463 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.500508 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.500547 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.603981 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.604189 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.604319 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.604325 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.604370 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.604404 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.604436 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.604437 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.604481 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.604514 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.604549 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.604559 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.604588 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.604519 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.604759 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:15 crc kubenswrapper[5045]: I1125 23:03:15.604810 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:03:16 crc kubenswrapper[5045]: I1125 23:03:16.037599 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 23:03:16 crc kubenswrapper[5045]: I1125 23:03:16.039627 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 23:03:16 crc kubenswrapper[5045]: I1125 23:03:16.040789 5045 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478" exitCode=0 Nov 25 23:03:16 crc kubenswrapper[5045]: I1125 23:03:16.040856 5045 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b" exitCode=0 Nov 25 23:03:16 crc kubenswrapper[5045]: I1125 23:03:16.040880 5045 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8" exitCode=0 Nov 25 23:03:16 crc kubenswrapper[5045]: I1125 23:03:16.040929 5045 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161" exitCode=2 Nov 25 23:03:16 crc kubenswrapper[5045]: I1125 23:03:16.040927 5045 scope.go:117] "RemoveContainer" containerID="384dc34698de11021b7de8d3dd765dcb48133744296ca5122f259e89d28c46a1" Nov 25 23:03:16 crc kubenswrapper[5045]: I1125 23:03:16.043677 5045 generic.go:334] "Generic (PLEG): container finished" podID="90af59c5-19b9-410b-9913-c678750e67c0" containerID="a8f7f3b7bb6a57a95937db6730726761e2464a72752de80a7545e932e09093c0" exitCode=0 Nov 25 23:03:16 crc kubenswrapper[5045]: I1125 23:03:16.043763 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"90af59c5-19b9-410b-9913-c678750e67c0","Type":"ContainerDied","Data":"a8f7f3b7bb6a57a95937db6730726761e2464a72752de80a7545e932e09093c0"} Nov 25 23:03:16 crc kubenswrapper[5045]: I1125 23:03:16.044808 5045 status_manager.go:851] "Failed to get status for pod" podUID="90af59c5-19b9-410b-9913-c678750e67c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.89:6443: connect: connection refused" Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.055214 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.422964 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.423530 5045 status_manager.go:851] "Failed to get status for pod" podUID="90af59c5-19b9-410b-9913-c678750e67c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.89:6443: connect: connection refused" Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.536151 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/90af59c5-19b9-410b-9913-c678750e67c0-kubelet-dir\") pod \"90af59c5-19b9-410b-9913-c678750e67c0\" (UID: \"90af59c5-19b9-410b-9913-c678750e67c0\") " Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.536618 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/90af59c5-19b9-410b-9913-c678750e67c0-var-lock\") pod \"90af59c5-19b9-410b-9913-c678750e67c0\" (UID: \"90af59c5-19b9-410b-9913-c678750e67c0\") " Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.536661 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/90af59c5-19b9-410b-9913-c678750e67c0-kube-api-access\") pod \"90af59c5-19b9-410b-9913-c678750e67c0\" (UID: \"90af59c5-19b9-410b-9913-c678750e67c0\") " Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.536289 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/90af59c5-19b9-410b-9913-c678750e67c0-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "90af59c5-19b9-410b-9913-c678750e67c0" (UID: "90af59c5-19b9-410b-9913-c678750e67c0"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.537858 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/90af59c5-19b9-410b-9913-c678750e67c0-var-lock" (OuterVolumeSpecName: "var-lock") pod "90af59c5-19b9-410b-9913-c678750e67c0" (UID: "90af59c5-19b9-410b-9913-c678750e67c0"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.545135 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90af59c5-19b9-410b-9913-c678750e67c0-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "90af59c5-19b9-410b-9913-c678750e67c0" (UID: "90af59c5-19b9-410b-9913-c678750e67c0"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.637591 5045 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/90af59c5-19b9-410b-9913-c678750e67c0-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.637622 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/90af59c5-19b9-410b-9913-c678750e67c0-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.637632 5045 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/90af59c5-19b9-410b-9913-c678750e67c0-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.778267 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.779611 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.780247 5045 status_manager.go:851] "Failed to get status for pod" podUID="90af59c5-19b9-410b-9913-c678750e67c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.89:6443: connect: connection refused" Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.780830 5045 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.89:6443: connect: connection refused" Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.940332 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.940463 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.940708 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.940833 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.940871 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.941025 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.941316 5045 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.941343 5045 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:17 crc kubenswrapper[5045]: I1125 23:03:17.941360 5045 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.069823 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.071233 5045 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879" exitCode=0 Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.071364 5045 scope.go:117] "RemoveContainer" containerID="555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.071377 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.076160 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"90af59c5-19b9-410b-9913-c678750e67c0","Type":"ContainerDied","Data":"00aad0f3d96f1f5c06f05ef99b38801c6e2de8613da6f63392a529995f8deb83"} Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.076198 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="00aad0f3d96f1f5c06f05ef99b38801c6e2de8613da6f63392a529995f8deb83" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.076223 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.102367 5045 scope.go:117] "RemoveContainer" containerID="99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.108398 5045 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.89:6443: connect: connection refused" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.109118 5045 status_manager.go:851] "Failed to get status for pod" podUID="90af59c5-19b9-410b-9913-c678750e67c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.89:6443: connect: connection refused" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.109829 5045 status_manager.go:851] "Failed to get status for pod" podUID="90af59c5-19b9-410b-9913-c678750e67c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.89:6443: connect: connection refused" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.110386 5045 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.89:6443: connect: connection refused" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.129884 5045 scope.go:117] "RemoveContainer" containerID="d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.149073 5045 scope.go:117] "RemoveContainer" containerID="7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.167060 5045 scope.go:117] "RemoveContainer" containerID="6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.184052 5045 scope.go:117] "RemoveContainer" containerID="91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.207544 5045 scope.go:117] "RemoveContainer" containerID="555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478" Nov 25 23:03:18 crc kubenswrapper[5045]: E1125 23:03:18.208309 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\": container with ID starting with 555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478 not found: ID does not exist" containerID="555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.208397 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478"} err="failed to get container status \"555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\": rpc error: code = NotFound desc = could not find container \"555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478\": container with ID starting with 555794e46d9c59b89f87a4be35f1a0b10de562ddb2500cfc2541345854973478 not found: ID does not exist" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.208545 5045 scope.go:117] "RemoveContainer" containerID="99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b" Nov 25 23:03:18 crc kubenswrapper[5045]: E1125 23:03:18.209136 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\": container with ID starting with 99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b not found: ID does not exist" containerID="99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.209176 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b"} err="failed to get container status \"99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\": rpc error: code = NotFound desc = could not find container \"99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b\": container with ID starting with 99a0bc0ce252bd227c3e88082fe16591c203101ee7ef58c8ccec47eff16a1a4b not found: ID does not exist" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.209202 5045 scope.go:117] "RemoveContainer" containerID="d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8" Nov 25 23:03:18 crc kubenswrapper[5045]: E1125 23:03:18.209779 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\": container with ID starting with d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8 not found: ID does not exist" containerID="d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.209840 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8"} err="failed to get container status \"d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\": rpc error: code = NotFound desc = could not find container \"d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8\": container with ID starting with d9818da2e85877275b5135e7de4489129f6b44efb5cfdf785859f402e0d9a3f8 not found: ID does not exist" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.209881 5045 scope.go:117] "RemoveContainer" containerID="7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161" Nov 25 23:03:18 crc kubenswrapper[5045]: E1125 23:03:18.211579 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\": container with ID starting with 7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161 not found: ID does not exist" containerID="7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.211744 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161"} err="failed to get container status \"7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\": rpc error: code = NotFound desc = could not find container \"7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161\": container with ID starting with 7fd86c27ff5f6925fa7b682b7f93fa74d7eeb063e8ad7ff2ce0853c4af33a161 not found: ID does not exist" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.211867 5045 scope.go:117] "RemoveContainer" containerID="6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879" Nov 25 23:03:18 crc kubenswrapper[5045]: E1125 23:03:18.212409 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\": container with ID starting with 6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879 not found: ID does not exist" containerID="6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.212440 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879"} err="failed to get container status \"6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\": rpc error: code = NotFound desc = could not find container \"6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879\": container with ID starting with 6e1e62d79f78d4a8711d7a6b9a0f1b7ba327f69627207dcd6f9aa50661388879 not found: ID does not exist" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.212458 5045 scope.go:117] "RemoveContainer" containerID="91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf" Nov 25 23:03:18 crc kubenswrapper[5045]: E1125 23:03:18.212790 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\": container with ID starting with 91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf not found: ID does not exist" containerID="91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.212852 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf"} err="failed to get container status \"91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\": rpc error: code = NotFound desc = could not find container \"91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf\": container with ID starting with 91834a96f994c4b88a5e2c7cd6dc92b69075d684b871585353e13024a29e08bf not found: ID does not exist" Nov 25 23:03:18 crc kubenswrapper[5045]: I1125 23:03:18.407537 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 25 23:03:20 crc kubenswrapper[5045]: E1125 23:03:20.439265 5045 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.89:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:03:20 crc kubenswrapper[5045]: I1125 23:03:20.440232 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:03:20 crc kubenswrapper[5045]: E1125 23:03:20.479814 5045 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.89:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b6255fb9e8406 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 23:03:20.478893062 +0000 UTC m=+256.836552174,LastTimestamp:2025-11-25 23:03:20.478893062 +0000 UTC m=+256.836552174,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 23:03:21 crc kubenswrapper[5045]: I1125 23:03:21.096855 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"0388346ba3a0886e8ce2c3a1d85576186ccc8b67a614b2c5c05f7409b9833234"} Nov 25 23:03:21 crc kubenswrapper[5045]: I1125 23:03:21.097226 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"5cb7b7e541d53592354fbc9891e962d948639722ac9b5065a40b8a78fe5a46b8"} Nov 25 23:03:21 crc kubenswrapper[5045]: I1125 23:03:21.097955 5045 status_manager.go:851] "Failed to get status for pod" podUID="90af59c5-19b9-410b-9913-c678750e67c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.89:6443: connect: connection refused" Nov 25 23:03:21 crc kubenswrapper[5045]: E1125 23:03:21.098120 5045 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.89:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:03:24 crc kubenswrapper[5045]: E1125 23:03:24.201969 5045 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.89:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b6255fb9e8406 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 23:03:20.478893062 +0000 UTC m=+256.836552174,LastTimestamp:2025-11-25 23:03:20.478893062 +0000 UTC m=+256.836552174,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 23:03:24 crc kubenswrapper[5045]: I1125 23:03:24.401369 5045 status_manager.go:851] "Failed to get status for pod" podUID="90af59c5-19b9-410b-9913-c678750e67c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.89:6443: connect: connection refused" Nov 25 23:03:24 crc kubenswrapper[5045]: E1125 23:03:24.655644 5045 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.89:6443: connect: connection refused" Nov 25 23:03:24 crc kubenswrapper[5045]: E1125 23:03:24.656333 5045 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.89:6443: connect: connection refused" Nov 25 23:03:24 crc kubenswrapper[5045]: E1125 23:03:24.656542 5045 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.89:6443: connect: connection refused" Nov 25 23:03:24 crc kubenswrapper[5045]: E1125 23:03:24.656697 5045 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.89:6443: connect: connection refused" Nov 25 23:03:24 crc kubenswrapper[5045]: E1125 23:03:24.656865 5045 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.89:6443: connect: connection refused" Nov 25 23:03:24 crc kubenswrapper[5045]: I1125 23:03:24.656887 5045 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 25 23:03:24 crc kubenswrapper[5045]: E1125 23:03:24.657051 5045 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.89:6443: connect: connection refused" interval="200ms" Nov 25 23:03:24 crc kubenswrapper[5045]: E1125 23:03:24.857934 5045 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.89:6443: connect: connection refused" interval="400ms" Nov 25 23:03:25 crc kubenswrapper[5045]: E1125 23:03:25.259546 5045 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.89:6443: connect: connection refused" interval="800ms" Nov 25 23:03:26 crc kubenswrapper[5045]: E1125 23:03:26.061252 5045 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.89:6443: connect: connection refused" interval="1.6s" Nov 25 23:03:26 crc kubenswrapper[5045]: I1125 23:03:26.396476 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:26 crc kubenswrapper[5045]: I1125 23:03:26.397386 5045 status_manager.go:851] "Failed to get status for pod" podUID="90af59c5-19b9-410b-9913-c678750e67c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.89:6443: connect: connection refused" Nov 25 23:03:26 crc kubenswrapper[5045]: I1125 23:03:26.422614 5045 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="75d95a7a-3880-4be0-905e-86eace3106e2" Nov 25 23:03:26 crc kubenswrapper[5045]: I1125 23:03:26.422669 5045 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="75d95a7a-3880-4be0-905e-86eace3106e2" Nov 25 23:03:26 crc kubenswrapper[5045]: E1125 23:03:26.423341 5045 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.89:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:26 crc kubenswrapper[5045]: I1125 23:03:26.424087 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:27 crc kubenswrapper[5045]: I1125 23:03:27.141765 5045 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="70970572d0f5d80d5419932628768ddbfff2fead3e73fd3d1c2ac73ac2bb4883" exitCode=0 Nov 25 23:03:27 crc kubenswrapper[5045]: I1125 23:03:27.141896 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"70970572d0f5d80d5419932628768ddbfff2fead3e73fd3d1c2ac73ac2bb4883"} Nov 25 23:03:27 crc kubenswrapper[5045]: I1125 23:03:27.142144 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"d1be3f4847354a8df89c5e088c033d15aa5f107a6630852fe1ad2374b86dbaa1"} Nov 25 23:03:27 crc kubenswrapper[5045]: I1125 23:03:27.142541 5045 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="75d95a7a-3880-4be0-905e-86eace3106e2" Nov 25 23:03:27 crc kubenswrapper[5045]: I1125 23:03:27.142559 5045 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="75d95a7a-3880-4be0-905e-86eace3106e2" Nov 25 23:03:27 crc kubenswrapper[5045]: E1125 23:03:27.143013 5045 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.89:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:27 crc kubenswrapper[5045]: I1125 23:03:27.143035 5045 status_manager.go:851] "Failed to get status for pod" podUID="90af59c5-19b9-410b-9913-c678750e67c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.89:6443: connect: connection refused" Nov 25 23:03:28 crc kubenswrapper[5045]: I1125 23:03:28.159094 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e25a089247ddb5d01915a8178ef280fbdc47a470a729abd0c4f2bd70a9ae0508"} Nov 25 23:03:28 crc kubenswrapper[5045]: I1125 23:03:28.159584 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"3acf38095b19963e606f6c7b1d41cc9bef9d590b00481af19f6fc8bcad1c0266"} Nov 25 23:03:28 crc kubenswrapper[5045]: I1125 23:03:28.159600 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"7c86e3de9612ea97f3c90df1a8a47dba72ada25eb54a7d5e436be9b838c1184b"} Nov 25 23:03:29 crc kubenswrapper[5045]: I1125 23:03:29.168580 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"44b66715b688a130c669d2cf06a66d328369296fbeed17440cc6b4cb686820c8"} Nov 25 23:03:29 crc kubenswrapper[5045]: I1125 23:03:29.168623 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"19a062e7692a60779a9fc9adefecd86adc485a19e1b7e5c8845c9bf859915f29"} Nov 25 23:03:29 crc kubenswrapper[5045]: I1125 23:03:29.168867 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:29 crc kubenswrapper[5045]: I1125 23:03:29.168987 5045 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="75d95a7a-3880-4be0-905e-86eace3106e2" Nov 25 23:03:29 crc kubenswrapper[5045]: I1125 23:03:29.169031 5045 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="75d95a7a-3880-4be0-905e-86eace3106e2" Nov 25 23:03:30 crc kubenswrapper[5045]: I1125 23:03:30.952655 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" podUID="f848dbf6-817b-44d7-b410-7ac266166501" containerName="oauth-openshift" containerID="cri-o://64355fe8ea8dfd40e098d2a8cb939e370a89949e1bd02991894e2adf45135430" gracePeriod=15 Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.184613 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.185020 5045 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db" exitCode=1 Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.185102 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db"} Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.185787 5045 scope.go:117] "RemoveContainer" containerID="c129476fc11ea7dea5d1f90d98dc6688f0ae729de3ba6c7238fcfc6b84fc56db" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.189680 5045 generic.go:334] "Generic (PLEG): container finished" podID="f848dbf6-817b-44d7-b410-7ac266166501" containerID="64355fe8ea8dfd40e098d2a8cb939e370a89949e1bd02991894e2adf45135430" exitCode=0 Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.189774 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" event={"ID":"f848dbf6-817b-44d7-b410-7ac266166501","Type":"ContainerDied","Data":"64355fe8ea8dfd40e098d2a8cb939e370a89949e1bd02991894e2adf45135430"} Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.424400 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.425017 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.430785 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.479776 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.634322 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-trusted-ca-bundle\") pod \"f848dbf6-817b-44d7-b410-7ac266166501\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.634394 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-serving-cert\") pod \"f848dbf6-817b-44d7-b410-7ac266166501\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.634445 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-cliconfig\") pod \"f848dbf6-817b-44d7-b410-7ac266166501\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.634486 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ldfmv\" (UniqueName: \"kubernetes.io/projected/f848dbf6-817b-44d7-b410-7ac266166501-kube-api-access-ldfmv\") pod \"f848dbf6-817b-44d7-b410-7ac266166501\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.634516 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-template-error\") pod \"f848dbf6-817b-44d7-b410-7ac266166501\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.634546 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-idp-0-file-data\") pod \"f848dbf6-817b-44d7-b410-7ac266166501\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.634580 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-session\") pod \"f848dbf6-817b-44d7-b410-7ac266166501\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.634627 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-ocp-branding-template\") pod \"f848dbf6-817b-44d7-b410-7ac266166501\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.634686 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-service-ca\") pod \"f848dbf6-817b-44d7-b410-7ac266166501\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.634744 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-audit-policies\") pod \"f848dbf6-817b-44d7-b410-7ac266166501\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.634776 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-template-login\") pod \"f848dbf6-817b-44d7-b410-7ac266166501\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.634810 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f848dbf6-817b-44d7-b410-7ac266166501-audit-dir\") pod \"f848dbf6-817b-44d7-b410-7ac266166501\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.634860 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-router-certs\") pod \"f848dbf6-817b-44d7-b410-7ac266166501\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.634893 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-template-provider-selection\") pod \"f848dbf6-817b-44d7-b410-7ac266166501\" (UID: \"f848dbf6-817b-44d7-b410-7ac266166501\") " Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.635930 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f848dbf6-817b-44d7-b410-7ac266166501-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f848dbf6-817b-44d7-b410-7ac266166501" (UID: "f848dbf6-817b-44d7-b410-7ac266166501"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.637006 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "f848dbf6-817b-44d7-b410-7ac266166501" (UID: "f848dbf6-817b-44d7-b410-7ac266166501"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.637033 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "f848dbf6-817b-44d7-b410-7ac266166501" (UID: "f848dbf6-817b-44d7-b410-7ac266166501"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.637055 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "f848dbf6-817b-44d7-b410-7ac266166501" (UID: "f848dbf6-817b-44d7-b410-7ac266166501"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.639053 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "f848dbf6-817b-44d7-b410-7ac266166501" (UID: "f848dbf6-817b-44d7-b410-7ac266166501"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.643925 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "f848dbf6-817b-44d7-b410-7ac266166501" (UID: "f848dbf6-817b-44d7-b410-7ac266166501"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.648090 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f848dbf6-817b-44d7-b410-7ac266166501-kube-api-access-ldfmv" (OuterVolumeSpecName: "kube-api-access-ldfmv") pod "f848dbf6-817b-44d7-b410-7ac266166501" (UID: "f848dbf6-817b-44d7-b410-7ac266166501"). InnerVolumeSpecName "kube-api-access-ldfmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.648442 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "f848dbf6-817b-44d7-b410-7ac266166501" (UID: "f848dbf6-817b-44d7-b410-7ac266166501"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.649292 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "f848dbf6-817b-44d7-b410-7ac266166501" (UID: "f848dbf6-817b-44d7-b410-7ac266166501"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.651585 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "f848dbf6-817b-44d7-b410-7ac266166501" (UID: "f848dbf6-817b-44d7-b410-7ac266166501"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.651929 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "f848dbf6-817b-44d7-b410-7ac266166501" (UID: "f848dbf6-817b-44d7-b410-7ac266166501"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.652558 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "f848dbf6-817b-44d7-b410-7ac266166501" (UID: "f848dbf6-817b-44d7-b410-7ac266166501"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.652673 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "f848dbf6-817b-44d7-b410-7ac266166501" (UID: "f848dbf6-817b-44d7-b410-7ac266166501"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.652989 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "f848dbf6-817b-44d7-b410-7ac266166501" (UID: "f848dbf6-817b-44d7-b410-7ac266166501"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.735976 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.736325 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.736349 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.736368 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.736390 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.736408 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ldfmv\" (UniqueName: \"kubernetes.io/projected/f848dbf6-817b-44d7-b410-7ac266166501-kube-api-access-ldfmv\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.736426 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.736444 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.736464 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.736481 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.736499 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.736517 5045 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f848dbf6-817b-44d7-b410-7ac266166501-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.736534 5045 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f848dbf6-817b-44d7-b410-7ac266166501-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:31 crc kubenswrapper[5045]: I1125 23:03:31.736552 5045 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f848dbf6-817b-44d7-b410-7ac266166501-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 23:03:32 crc kubenswrapper[5045]: I1125 23:03:32.208857 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 23:03:32 crc kubenswrapper[5045]: I1125 23:03:32.209121 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"6c154397d4b172a7f0e34dcf8eacfdb58fd5b5e8cf782fa11bfe3fa077dc905b"} Nov 25 23:03:32 crc kubenswrapper[5045]: I1125 23:03:32.212483 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" event={"ID":"f848dbf6-817b-44d7-b410-7ac266166501","Type":"ContainerDied","Data":"fb3faeffdf6f61d896fff2332668cec2af39d221d9b982eaa19c827b4c0ca3d9"} Nov 25 23:03:32 crc kubenswrapper[5045]: I1125 23:03:32.212548 5045 scope.go:117] "RemoveContainer" containerID="64355fe8ea8dfd40e098d2a8cb939e370a89949e1bd02991894e2adf45135430" Nov 25 23:03:32 crc kubenswrapper[5045]: I1125 23:03:32.212550 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-2btnr" Nov 25 23:03:33 crc kubenswrapper[5045]: I1125 23:03:33.051692 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 23:03:33 crc kubenswrapper[5045]: I1125 23:03:33.837517 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 23:03:33 crc kubenswrapper[5045]: I1125 23:03:33.844117 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 23:03:34 crc kubenswrapper[5045]: I1125 23:03:34.187136 5045 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:34 crc kubenswrapper[5045]: I1125 23:03:34.228940 5045 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="75d95a7a-3880-4be0-905e-86eace3106e2" Nov 25 23:03:34 crc kubenswrapper[5045]: I1125 23:03:34.228976 5045 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="75d95a7a-3880-4be0-905e-86eace3106e2" Nov 25 23:03:34 crc kubenswrapper[5045]: I1125 23:03:34.234770 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:34 crc kubenswrapper[5045]: I1125 23:03:34.406796 5045 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="d82575f7-7872-49dc-b8ee-8c1c6bcbfe9a" Nov 25 23:03:35 crc kubenswrapper[5045]: I1125 23:03:35.235904 5045 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="75d95a7a-3880-4be0-905e-86eace3106e2" Nov 25 23:03:35 crc kubenswrapper[5045]: I1125 23:03:35.235939 5045 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="75d95a7a-3880-4be0-905e-86eace3106e2" Nov 25 23:03:35 crc kubenswrapper[5045]: I1125 23:03:35.239040 5045 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="d82575f7-7872-49dc-b8ee-8c1c6bcbfe9a" Nov 25 23:03:40 crc kubenswrapper[5045]: I1125 23:03:40.778477 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 23:03:40 crc kubenswrapper[5045]: I1125 23:03:40.794925 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 23:03:41 crc kubenswrapper[5045]: I1125 23:03:41.083432 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 23:03:41 crc kubenswrapper[5045]: I1125 23:03:41.804658 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 23:03:42 crc kubenswrapper[5045]: I1125 23:03:42.438316 5045 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 23:03:42 crc kubenswrapper[5045]: I1125 23:03:42.718346 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 23:03:43 crc kubenswrapper[5045]: I1125 23:03:43.011092 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 23:03:43 crc kubenswrapper[5045]: I1125 23:03:43.056472 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 23:03:43 crc kubenswrapper[5045]: I1125 23:03:43.071438 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 23:03:44 crc kubenswrapper[5045]: I1125 23:03:44.234963 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 23:03:44 crc kubenswrapper[5045]: I1125 23:03:44.317424 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 23:03:44 crc kubenswrapper[5045]: I1125 23:03:44.606475 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 23:03:44 crc kubenswrapper[5045]: I1125 23:03:44.965604 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 23:03:44 crc kubenswrapper[5045]: I1125 23:03:44.975008 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 23:03:45 crc kubenswrapper[5045]: I1125 23:03:45.082509 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 23:03:45 crc kubenswrapper[5045]: I1125 23:03:45.338563 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 23:03:45 crc kubenswrapper[5045]: I1125 23:03:45.766190 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 23:03:45 crc kubenswrapper[5045]: I1125 23:03:45.957001 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 23:03:46 crc kubenswrapper[5045]: I1125 23:03:46.092177 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 23:03:46 crc kubenswrapper[5045]: I1125 23:03:46.352431 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 23:03:46 crc kubenswrapper[5045]: I1125 23:03:46.467294 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 23:03:46 crc kubenswrapper[5045]: I1125 23:03:46.602563 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 23:03:47 crc kubenswrapper[5045]: I1125 23:03:47.081937 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 23:03:47 crc kubenswrapper[5045]: I1125 23:03:47.663141 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 23:03:47 crc kubenswrapper[5045]: I1125 23:03:47.688602 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 23:03:47 crc kubenswrapper[5045]: I1125 23:03:47.749746 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 23:03:47 crc kubenswrapper[5045]: I1125 23:03:47.859067 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 23:03:48 crc kubenswrapper[5045]: I1125 23:03:48.051530 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 23:03:48 crc kubenswrapper[5045]: I1125 23:03:48.291149 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 23:03:48 crc kubenswrapper[5045]: I1125 23:03:48.316520 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 23:03:48 crc kubenswrapper[5045]: I1125 23:03:48.617620 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 23:03:48 crc kubenswrapper[5045]: I1125 23:03:48.690208 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 23:03:48 crc kubenswrapper[5045]: I1125 23:03:48.719316 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 23:03:48 crc kubenswrapper[5045]: I1125 23:03:48.823881 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 23:03:48 crc kubenswrapper[5045]: I1125 23:03:48.836128 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 23:03:49 crc kubenswrapper[5045]: I1125 23:03:49.087920 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 23:03:49 crc kubenswrapper[5045]: I1125 23:03:49.147539 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 23:03:49 crc kubenswrapper[5045]: I1125 23:03:49.226538 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 23:03:49 crc kubenswrapper[5045]: I1125 23:03:49.237394 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 23:03:49 crc kubenswrapper[5045]: I1125 23:03:49.274241 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 23:03:49 crc kubenswrapper[5045]: I1125 23:03:49.380632 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 23:03:49 crc kubenswrapper[5045]: I1125 23:03:49.432535 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 23:03:49 crc kubenswrapper[5045]: I1125 23:03:49.637408 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 23:03:49 crc kubenswrapper[5045]: I1125 23:03:49.703328 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 23:03:49 crc kubenswrapper[5045]: I1125 23:03:49.726745 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 23:03:49 crc kubenswrapper[5045]: I1125 23:03:49.931558 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 23:03:49 crc kubenswrapper[5045]: I1125 23:03:49.956702 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 23:03:50 crc kubenswrapper[5045]: I1125 23:03:50.016087 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 23:03:50 crc kubenswrapper[5045]: I1125 23:03:50.149615 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 23:03:50 crc kubenswrapper[5045]: I1125 23:03:50.297579 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 23:03:50 crc kubenswrapper[5045]: I1125 23:03:50.299574 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 23:03:50 crc kubenswrapper[5045]: I1125 23:03:50.345224 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 23:03:50 crc kubenswrapper[5045]: I1125 23:03:50.355820 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 23:03:50 crc kubenswrapper[5045]: I1125 23:03:50.413293 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 23:03:50 crc kubenswrapper[5045]: I1125 23:03:50.454162 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 23:03:50 crc kubenswrapper[5045]: I1125 23:03:50.662185 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 23:03:50 crc kubenswrapper[5045]: I1125 23:03:50.726894 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 23:03:50 crc kubenswrapper[5045]: I1125 23:03:50.737459 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 23:03:50 crc kubenswrapper[5045]: I1125 23:03:50.803461 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 23:03:50 crc kubenswrapper[5045]: I1125 23:03:50.804548 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 23:03:50 crc kubenswrapper[5045]: I1125 23:03:50.821165 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 23:03:50 crc kubenswrapper[5045]: I1125 23:03:50.866813 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 23:03:50 crc kubenswrapper[5045]: I1125 23:03:50.903505 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 23:03:50 crc kubenswrapper[5045]: I1125 23:03:50.917492 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 23:03:50 crc kubenswrapper[5045]: I1125 23:03:50.926072 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 23:03:50 crc kubenswrapper[5045]: I1125 23:03:50.962434 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 23:03:51 crc kubenswrapper[5045]: I1125 23:03:51.126559 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 23:03:51 crc kubenswrapper[5045]: I1125 23:03:51.129645 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 23:03:51 crc kubenswrapper[5045]: I1125 23:03:51.147949 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 23:03:51 crc kubenswrapper[5045]: I1125 23:03:51.299409 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 23:03:51 crc kubenswrapper[5045]: I1125 23:03:51.441618 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 23:03:51 crc kubenswrapper[5045]: I1125 23:03:51.484159 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 23:03:51 crc kubenswrapper[5045]: I1125 23:03:51.584229 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 23:03:51 crc kubenswrapper[5045]: I1125 23:03:51.604748 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 23:03:51 crc kubenswrapper[5045]: I1125 23:03:51.703404 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 23:03:51 crc kubenswrapper[5045]: I1125 23:03:51.976541 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 23:03:52 crc kubenswrapper[5045]: I1125 23:03:52.022483 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 23:03:52 crc kubenswrapper[5045]: I1125 23:03:52.027795 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 23:03:52 crc kubenswrapper[5045]: I1125 23:03:52.028666 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 23:03:52 crc kubenswrapper[5045]: I1125 23:03:52.083027 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 23:03:52 crc kubenswrapper[5045]: I1125 23:03:52.263264 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 23:03:52 crc kubenswrapper[5045]: I1125 23:03:52.300317 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 23:03:52 crc kubenswrapper[5045]: I1125 23:03:52.397866 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 23:03:52 crc kubenswrapper[5045]: I1125 23:03:52.431094 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 23:03:52 crc kubenswrapper[5045]: I1125 23:03:52.465511 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 23:03:52 crc kubenswrapper[5045]: I1125 23:03:52.476413 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 23:03:52 crc kubenswrapper[5045]: I1125 23:03:52.479846 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 23:03:52 crc kubenswrapper[5045]: I1125 23:03:52.524159 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 23:03:52 crc kubenswrapper[5045]: I1125 23:03:52.559526 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 23:03:52 crc kubenswrapper[5045]: I1125 23:03:52.624426 5045 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 23:03:52 crc kubenswrapper[5045]: I1125 23:03:52.887389 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 23:03:52 crc kubenswrapper[5045]: I1125 23:03:52.948250 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.032202 5045 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.041425 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.074692 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.134412 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.168161 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.260551 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.350345 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.390570 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.398308 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.402088 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.446354 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.480981 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.591533 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.591777 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.608837 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.751172 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.755241 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.809192 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.864997 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.898593 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.910452 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.925029 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 23:03:53 crc kubenswrapper[5045]: I1125 23:03:53.940472 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 23:03:54 crc kubenswrapper[5045]: I1125 23:03:54.005868 5045 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 23:03:54 crc kubenswrapper[5045]: I1125 23:03:54.194406 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 23:03:54 crc kubenswrapper[5045]: I1125 23:03:54.295775 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 23:03:54 crc kubenswrapper[5045]: I1125 23:03:54.315087 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 23:03:54 crc kubenswrapper[5045]: I1125 23:03:54.322625 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 23:03:54 crc kubenswrapper[5045]: I1125 23:03:54.469273 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 23:03:54 crc kubenswrapper[5045]: I1125 23:03:54.537038 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 23:03:54 crc kubenswrapper[5045]: I1125 23:03:54.862958 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 23:03:54 crc kubenswrapper[5045]: I1125 23:03:54.914370 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 23:03:54 crc kubenswrapper[5045]: I1125 23:03:54.915917 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 23:03:54 crc kubenswrapper[5045]: I1125 23:03:54.947679 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 23:03:54 crc kubenswrapper[5045]: I1125 23:03:54.976279 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 23:03:54 crc kubenswrapper[5045]: I1125 23:03:54.989155 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.074466 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.083340 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.240136 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.266186 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.363607 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.363863 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.376429 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.403666 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.451214 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.569388 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.620128 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.646858 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.656669 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.695138 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.695764 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.750874 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.781577 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.808564 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.858181 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.882961 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 23:03:55 crc kubenswrapper[5045]: I1125 23:03:55.995165 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.081433 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.119198 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.180076 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.246093 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.246544 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.367236 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.410985 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.449286 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.490657 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.604539 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.755162 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.787316 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.788963 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.799987 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.819638 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.836029 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.869241 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.892491 5045 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.895751 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.898143 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2btnr","openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.898219 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-5477954dc8-cvshr"] Nov 25 23:03:56 crc kubenswrapper[5045]: E1125 23:03:56.898459 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90af59c5-19b9-410b-9913-c678750e67c0" containerName="installer" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.898488 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="90af59c5-19b9-410b-9913-c678750e67c0" containerName="installer" Nov 25 23:03:56 crc kubenswrapper[5045]: E1125 23:03:56.898528 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f848dbf6-817b-44d7-b410-7ac266166501" containerName="oauth-openshift" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.898541 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="f848dbf6-817b-44d7-b410-7ac266166501" containerName="oauth-openshift" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.898694 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="f848dbf6-817b-44d7-b410-7ac266166501" containerName="oauth-openshift" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.898756 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="90af59c5-19b9-410b-9913-c678750e67c0" containerName="installer" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.898838 5045 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="75d95a7a-3880-4be0-905e-86eace3106e2" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.898890 5045 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="75d95a7a-3880-4be0-905e-86eace3106e2" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.899259 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.902025 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.902257 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.902567 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.904152 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.904655 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.904685 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.904749 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.904755 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.904760 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.904850 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.905880 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.910066 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.910378 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.914428 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.917263 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.921754 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.943652 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 23:03:56 crc kubenswrapper[5045]: I1125 23:03:56.952561 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=22.952537381 podStartE2EDuration="22.952537381s" podCreationTimestamp="2025-11-25 23:03:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:03:56.951452878 +0000 UTC m=+293.309111990" watchObservedRunningTime="2025-11-25 23:03:56.952537381 +0000 UTC m=+293.310196503" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.007467 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-222xz\" (UniqueName: \"kubernetes.io/projected/cff4bfea-ae50-4298-bfc4-f21a10231ef2-kube-api-access-222xz\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.007518 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-router-certs\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.007549 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-session\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.007565 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.007583 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.007603 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-user-template-error\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.007621 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-service-ca\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.007681 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-user-template-login\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.007699 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/cff4bfea-ae50-4298-bfc4-f21a10231ef2-audit-policies\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.007745 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.007809 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cff4bfea-ae50-4298-bfc4-f21a10231ef2-audit-dir\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.007891 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.007961 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.008024 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.108839 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-user-template-login\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.108878 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/cff4bfea-ae50-4298-bfc4-f21a10231ef2-audit-policies\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.108914 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.108938 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cff4bfea-ae50-4298-bfc4-f21a10231ef2-audit-dir\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.108958 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.108982 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.109003 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.109021 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-222xz\" (UniqueName: \"kubernetes.io/projected/cff4bfea-ae50-4298-bfc4-f21a10231ef2-kube-api-access-222xz\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.109039 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-router-certs\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.109057 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-session\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.109071 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.109090 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.109110 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-user-template-error\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.109126 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-service-ca\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.109775 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cff4bfea-ae50-4298-bfc4-f21a10231ef2-audit-dir\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.110066 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-service-ca\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.110128 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.110405 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/cff4bfea-ae50-4298-bfc4-f21a10231ef2-audit-policies\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.110599 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.116610 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.116665 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.116864 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-user-template-login\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.116928 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-user-template-error\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.117069 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.117408 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-session\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.119107 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-router-certs\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.120695 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/cff4bfea-ae50-4298-bfc4-f21a10231ef2-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.140255 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-222xz\" (UniqueName: \"kubernetes.io/projected/cff4bfea-ae50-4298-bfc4-f21a10231ef2-kube-api-access-222xz\") pod \"oauth-openshift-5477954dc8-cvshr\" (UID: \"cff4bfea-ae50-4298-bfc4-f21a10231ef2\") " pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.159267 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.219586 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.346230 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.350491 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.390080 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.402670 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.448810 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.526045 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.572403 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.578042 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.688517 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.820374 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.825224 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.845023 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.847071 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 23:03:57 crc kubenswrapper[5045]: I1125 23:03:57.908185 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 23:03:58 crc kubenswrapper[5045]: I1125 23:03:58.053504 5045 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 23:03:58 crc kubenswrapper[5045]: I1125 23:03:58.097005 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 23:03:58 crc kubenswrapper[5045]: I1125 23:03:58.167842 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 23:03:58 crc kubenswrapper[5045]: I1125 23:03:58.360822 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 23:03:58 crc kubenswrapper[5045]: I1125 23:03:58.403336 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f848dbf6-817b-44d7-b410-7ac266166501" path="/var/lib/kubelet/pods/f848dbf6-817b-44d7-b410-7ac266166501/volumes" Nov 25 23:03:58 crc kubenswrapper[5045]: I1125 23:03:58.406073 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 23:03:58 crc kubenswrapper[5045]: I1125 23:03:58.427272 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 23:03:58 crc kubenswrapper[5045]: I1125 23:03:58.432898 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 23:03:58 crc kubenswrapper[5045]: I1125 23:03:58.507674 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 23:03:58 crc kubenswrapper[5045]: I1125 23:03:58.511259 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 23:03:58 crc kubenswrapper[5045]: I1125 23:03:58.563782 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 23:03:58 crc kubenswrapper[5045]: I1125 23:03:58.617986 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 23:03:58 crc kubenswrapper[5045]: I1125 23:03:58.648411 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 23:03:58 crc kubenswrapper[5045]: I1125 23:03:58.705770 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 23:03:58 crc kubenswrapper[5045]: I1125 23:03:58.775309 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 23:03:58 crc kubenswrapper[5045]: I1125 23:03:58.825235 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 23:03:58 crc kubenswrapper[5045]: I1125 23:03:58.856908 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 23:03:58 crc kubenswrapper[5045]: I1125 23:03:58.901638 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 23:03:58 crc kubenswrapper[5045]: I1125 23:03:58.991284 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 23:03:59 crc kubenswrapper[5045]: I1125 23:03:59.078530 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 23:03:59 crc kubenswrapper[5045]: I1125 23:03:59.118637 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 23:03:59 crc kubenswrapper[5045]: I1125 23:03:59.133428 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 23:03:59 crc kubenswrapper[5045]: I1125 23:03:59.172127 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 23:03:59 crc kubenswrapper[5045]: I1125 23:03:59.268884 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-5477954dc8-cvshr"] Nov 25 23:03:59 crc kubenswrapper[5045]: I1125 23:03:59.456078 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 23:03:59 crc kubenswrapper[5045]: I1125 23:03:59.484060 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 23:03:59 crc kubenswrapper[5045]: I1125 23:03:59.485582 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 23:03:59 crc kubenswrapper[5045]: I1125 23:03:59.507650 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 23:03:59 crc kubenswrapper[5045]: I1125 23:03:59.545225 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-5477954dc8-cvshr"] Nov 25 23:03:59 crc kubenswrapper[5045]: I1125 23:03:59.773204 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 23:03:59 crc kubenswrapper[5045]: I1125 23:03:59.998770 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 23:04:00 crc kubenswrapper[5045]: I1125 23:04:00.078192 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 23:04:00 crc kubenswrapper[5045]: I1125 23:04:00.172415 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 23:04:00 crc kubenswrapper[5045]: I1125 23:04:00.314156 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 23:04:00 crc kubenswrapper[5045]: I1125 23:04:00.328400 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 23:04:00 crc kubenswrapper[5045]: I1125 23:04:00.349464 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 23:04:00 crc kubenswrapper[5045]: I1125 23:04:00.448383 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" event={"ID":"cff4bfea-ae50-4298-bfc4-f21a10231ef2","Type":"ContainerStarted","Data":"c83976e2a60d26f6f919a709fdfd00460897062ca1bbced70b88ebcd4eddce87"} Nov 25 23:04:00 crc kubenswrapper[5045]: I1125 23:04:00.448429 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" event={"ID":"cff4bfea-ae50-4298-bfc4-f21a10231ef2","Type":"ContainerStarted","Data":"9d0939731fa700a97ecdbce19dd8b57eefe605cdfe6985ff403f07f96c184d11"} Nov 25 23:04:00 crc kubenswrapper[5045]: I1125 23:04:00.450032 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:04:00 crc kubenswrapper[5045]: I1125 23:04:00.457246 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" Nov 25 23:04:00 crc kubenswrapper[5045]: I1125 23:04:00.483667 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-5477954dc8-cvshr" podStartSLOduration=55.483643328 podStartE2EDuration="55.483643328s" podCreationTimestamp="2025-11-25 23:03:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:04:00.479307537 +0000 UTC m=+296.836966649" watchObservedRunningTime="2025-11-25 23:04:00.483643328 +0000 UTC m=+296.841302440" Nov 25 23:04:00 crc kubenswrapper[5045]: I1125 23:04:00.628629 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 23:04:00 crc kubenswrapper[5045]: I1125 23:04:00.758070 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 23:04:00 crc kubenswrapper[5045]: I1125 23:04:00.789588 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 23:04:01 crc kubenswrapper[5045]: I1125 23:04:01.195051 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 23:04:01 crc kubenswrapper[5045]: I1125 23:04:01.231947 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 23:04:01 crc kubenswrapper[5045]: I1125 23:04:01.261871 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 23:04:01 crc kubenswrapper[5045]: I1125 23:04:01.272454 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 23:04:01 crc kubenswrapper[5045]: I1125 23:04:01.389287 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 23:04:01 crc kubenswrapper[5045]: I1125 23:04:01.475335 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 23:04:01 crc kubenswrapper[5045]: I1125 23:04:01.662374 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 23:04:01 crc kubenswrapper[5045]: I1125 23:04:01.819849 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 23:04:01 crc kubenswrapper[5045]: I1125 23:04:01.857342 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 23:04:01 crc kubenswrapper[5045]: I1125 23:04:01.888831 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 23:04:01 crc kubenswrapper[5045]: I1125 23:04:01.908443 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 23:04:02 crc kubenswrapper[5045]: I1125 23:04:02.033485 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 23:04:02 crc kubenswrapper[5045]: I1125 23:04:02.055292 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 23:04:02 crc kubenswrapper[5045]: I1125 23:04:02.080333 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 23:04:02 crc kubenswrapper[5045]: I1125 23:04:02.379691 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 23:04:02 crc kubenswrapper[5045]: I1125 23:04:02.602278 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 23:04:03 crc kubenswrapper[5045]: I1125 23:04:03.141961 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 23:04:03 crc kubenswrapper[5045]: I1125 23:04:03.580576 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 23:04:08 crc kubenswrapper[5045]: I1125 23:04:08.072634 5045 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 23:04:08 crc kubenswrapper[5045]: I1125 23:04:08.073524 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://0388346ba3a0886e8ce2c3a1d85576186ccc8b67a614b2c5c05f7409b9833234" gracePeriod=5 Nov 25 23:04:13 crc kubenswrapper[5045]: I1125 23:04:13.535275 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 23:04:13 crc kubenswrapper[5045]: I1125 23:04:13.535688 5045 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="0388346ba3a0886e8ce2c3a1d85576186ccc8b67a614b2c5c05f7409b9833234" exitCode=137 Nov 25 23:04:13 crc kubenswrapper[5045]: I1125 23:04:13.636724 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 23:04:13 crc kubenswrapper[5045]: I1125 23:04:13.636797 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:04:13 crc kubenswrapper[5045]: I1125 23:04:13.744145 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 23:04:13 crc kubenswrapper[5045]: I1125 23:04:13.744260 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 23:04:13 crc kubenswrapper[5045]: I1125 23:04:13.744290 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:04:13 crc kubenswrapper[5045]: I1125 23:04:13.744307 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 23:04:13 crc kubenswrapper[5045]: I1125 23:04:13.744342 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:04:13 crc kubenswrapper[5045]: I1125 23:04:13.744389 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 23:04:13 crc kubenswrapper[5045]: I1125 23:04:13.744417 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 23:04:13 crc kubenswrapper[5045]: I1125 23:04:13.744551 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:04:13 crc kubenswrapper[5045]: I1125 23:04:13.744543 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:04:13 crc kubenswrapper[5045]: I1125 23:04:13.744919 5045 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:13 crc kubenswrapper[5045]: I1125 23:04:13.744942 5045 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:13 crc kubenswrapper[5045]: I1125 23:04:13.744994 5045 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:13 crc kubenswrapper[5045]: I1125 23:04:13.745013 5045 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:13 crc kubenswrapper[5045]: I1125 23:04:13.756416 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:04:13 crc kubenswrapper[5045]: I1125 23:04:13.846408 5045 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:14 crc kubenswrapper[5045]: I1125 23:04:14.406151 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 25 23:04:14 crc kubenswrapper[5045]: I1125 23:04:14.542313 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 23:04:14 crc kubenswrapper[5045]: I1125 23:04:14.542407 5045 scope.go:117] "RemoveContainer" containerID="0388346ba3a0886e8ce2c3a1d85576186ccc8b67a614b2c5c05f7409b9833234" Nov 25 23:04:14 crc kubenswrapper[5045]: I1125 23:04:14.542552 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 23:04:20 crc kubenswrapper[5045]: I1125 23:04:20.440645 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-mnm5x"] Nov 25 23:04:20 crc kubenswrapper[5045]: I1125 23:04:20.442074 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" podUID="c9443fc5-a284-4838-a107-6146af9d6bba" containerName="controller-manager" containerID="cri-o://5ff039174ba6951b34cf2bf8f0c30713d8ba0f816258ffb7914b6ee664b676e2" gracePeriod=30 Nov 25 23:04:20 crc kubenswrapper[5045]: I1125 23:04:20.531011 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9"] Nov 25 23:04:20 crc kubenswrapper[5045]: I1125 23:04:20.531521 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" podUID="4e266b78-e9fa-40bf-844b-7d5e273e988b" containerName="route-controller-manager" containerID="cri-o://b60c7ea29e95069c84cf32da2b7cf1c9eed8397d9b2c08a4290f3bb798575976" gracePeriod=30 Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.508070 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.533306 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6c448b59b9-2plpd"] Nov 25 23:04:21 crc kubenswrapper[5045]: E1125 23:04:21.533539 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.533554 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 23:04:21 crc kubenswrapper[5045]: E1125 23:04:21.533569 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9443fc5-a284-4838-a107-6146af9d6bba" containerName="controller-manager" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.533578 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9443fc5-a284-4838-a107-6146af9d6bba" containerName="controller-manager" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.533686 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.533705 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9443fc5-a284-4838-a107-6146af9d6bba" containerName="controller-manager" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.534145 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.545174 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6c448b59b9-2plpd"] Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.593447 5045 generic.go:334] "Generic (PLEG): container finished" podID="4e266b78-e9fa-40bf-844b-7d5e273e988b" containerID="b60c7ea29e95069c84cf32da2b7cf1c9eed8397d9b2c08a4290f3bb798575976" exitCode=0 Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.593605 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" event={"ID":"4e266b78-e9fa-40bf-844b-7d5e273e988b","Type":"ContainerDied","Data":"b60c7ea29e95069c84cf32da2b7cf1c9eed8397d9b2c08a4290f3bb798575976"} Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.593864 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" event={"ID":"4e266b78-e9fa-40bf-844b-7d5e273e988b","Type":"ContainerDied","Data":"b6ab6a6e5d8a08814ed01b34ba11dd460128d0356877b0c51fd072eafd6ffa46"} Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.593908 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b6ab6a6e5d8a08814ed01b34ba11dd460128d0356877b0c51fd072eafd6ffa46" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.595036 5045 generic.go:334] "Generic (PLEG): container finished" podID="c9443fc5-a284-4838-a107-6146af9d6bba" containerID="5ff039174ba6951b34cf2bf8f0c30713d8ba0f816258ffb7914b6ee664b676e2" exitCode=0 Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.595073 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" event={"ID":"c9443fc5-a284-4838-a107-6146af9d6bba","Type":"ContainerDied","Data":"5ff039174ba6951b34cf2bf8f0c30713d8ba0f816258ffb7914b6ee664b676e2"} Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.595133 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" event={"ID":"c9443fc5-a284-4838-a107-6146af9d6bba","Type":"ContainerDied","Data":"36bb8fe6c05d7096723062383a9b474849ab1b06e128aa9d8621a00cb434c48f"} Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.595157 5045 scope.go:117] "RemoveContainer" containerID="5ff039174ba6951b34cf2bf8f0c30713d8ba0f816258ffb7914b6ee664b676e2" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.595099 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-mnm5x" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.600212 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.617676 5045 scope.go:117] "RemoveContainer" containerID="5ff039174ba6951b34cf2bf8f0c30713d8ba0f816258ffb7914b6ee664b676e2" Nov 25 23:04:21 crc kubenswrapper[5045]: E1125 23:04:21.618212 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ff039174ba6951b34cf2bf8f0c30713d8ba0f816258ffb7914b6ee664b676e2\": container with ID starting with 5ff039174ba6951b34cf2bf8f0c30713d8ba0f816258ffb7914b6ee664b676e2 not found: ID does not exist" containerID="5ff039174ba6951b34cf2bf8f0c30713d8ba0f816258ffb7914b6ee664b676e2" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.618310 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ff039174ba6951b34cf2bf8f0c30713d8ba0f816258ffb7914b6ee664b676e2"} err="failed to get container status \"5ff039174ba6951b34cf2bf8f0c30713d8ba0f816258ffb7914b6ee664b676e2\": rpc error: code = NotFound desc = could not find container \"5ff039174ba6951b34cf2bf8f0c30713d8ba0f816258ffb7914b6ee664b676e2\": container with ID starting with 5ff039174ba6951b34cf2bf8f0c30713d8ba0f816258ffb7914b6ee664b676e2 not found: ID does not exist" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.656962 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-config\") pod \"c9443fc5-a284-4838-a107-6146af9d6bba\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.657741 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-config" (OuterVolumeSpecName: "config") pod "c9443fc5-a284-4838-a107-6146af9d6bba" (UID: "c9443fc5-a284-4838-a107-6146af9d6bba"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.657840 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9443fc5-a284-4838-a107-6146af9d6bba-serving-cert\") pod \"c9443fc5-a284-4838-a107-6146af9d6bba\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.657926 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-proxy-ca-bundles\") pod \"c9443fc5-a284-4838-a107-6146af9d6bba\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.657982 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-client-ca\") pod \"c9443fc5-a284-4838-a107-6146af9d6bba\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.658012 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6x5j\" (UniqueName: \"kubernetes.io/projected/c9443fc5-a284-4838-a107-6146af9d6bba-kube-api-access-v6x5j\") pod \"c9443fc5-a284-4838-a107-6146af9d6bba\" (UID: \"c9443fc5-a284-4838-a107-6146af9d6bba\") " Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.658150 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q79d8\" (UniqueName: \"kubernetes.io/projected/c15a1cc4-68ab-4104-a7a5-c08399fff195-kube-api-access-q79d8\") pod \"controller-manager-6c448b59b9-2plpd\" (UID: \"c15a1cc4-68ab-4104-a7a5-c08399fff195\") " pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.658232 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c15a1cc4-68ab-4104-a7a5-c08399fff195-serving-cert\") pod \"controller-manager-6c448b59b9-2plpd\" (UID: \"c15a1cc4-68ab-4104-a7a5-c08399fff195\") " pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.658263 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c15a1cc4-68ab-4104-a7a5-c08399fff195-proxy-ca-bundles\") pod \"controller-manager-6c448b59b9-2plpd\" (UID: \"c15a1cc4-68ab-4104-a7a5-c08399fff195\") " pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.658302 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c15a1cc4-68ab-4104-a7a5-c08399fff195-client-ca\") pod \"controller-manager-6c448b59b9-2plpd\" (UID: \"c15a1cc4-68ab-4104-a7a5-c08399fff195\") " pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.658335 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-client-ca" (OuterVolumeSpecName: "client-ca") pod "c9443fc5-a284-4838-a107-6146af9d6bba" (UID: "c9443fc5-a284-4838-a107-6146af9d6bba"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.658445 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c15a1cc4-68ab-4104-a7a5-c08399fff195-config\") pod \"controller-manager-6c448b59b9-2plpd\" (UID: \"c15a1cc4-68ab-4104-a7a5-c08399fff195\") " pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.658754 5045 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.658805 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.658833 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "c9443fc5-a284-4838-a107-6146af9d6bba" (UID: "c9443fc5-a284-4838-a107-6146af9d6bba"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.662571 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9443fc5-a284-4838-a107-6146af9d6bba-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c9443fc5-a284-4838-a107-6146af9d6bba" (UID: "c9443fc5-a284-4838-a107-6146af9d6bba"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.663110 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9443fc5-a284-4838-a107-6146af9d6bba-kube-api-access-v6x5j" (OuterVolumeSpecName: "kube-api-access-v6x5j") pod "c9443fc5-a284-4838-a107-6146af9d6bba" (UID: "c9443fc5-a284-4838-a107-6146af9d6bba"). InnerVolumeSpecName "kube-api-access-v6x5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.760287 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4e266b78-e9fa-40bf-844b-7d5e273e988b-client-ca\") pod \"4e266b78-e9fa-40bf-844b-7d5e273e988b\" (UID: \"4e266b78-e9fa-40bf-844b-7d5e273e988b\") " Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.760369 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e266b78-e9fa-40bf-844b-7d5e273e988b-config\") pod \"4e266b78-e9fa-40bf-844b-7d5e273e988b\" (UID: \"4e266b78-e9fa-40bf-844b-7d5e273e988b\") " Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.760423 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e266b78-e9fa-40bf-844b-7d5e273e988b-serving-cert\") pod \"4e266b78-e9fa-40bf-844b-7d5e273e988b\" (UID: \"4e266b78-e9fa-40bf-844b-7d5e273e988b\") " Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.760463 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9w77\" (UniqueName: \"kubernetes.io/projected/4e266b78-e9fa-40bf-844b-7d5e273e988b-kube-api-access-w9w77\") pod \"4e266b78-e9fa-40bf-844b-7d5e273e988b\" (UID: \"4e266b78-e9fa-40bf-844b-7d5e273e988b\") " Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.760903 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c15a1cc4-68ab-4104-a7a5-c08399fff195-serving-cert\") pod \"controller-manager-6c448b59b9-2plpd\" (UID: \"c15a1cc4-68ab-4104-a7a5-c08399fff195\") " pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.760941 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c15a1cc4-68ab-4104-a7a5-c08399fff195-proxy-ca-bundles\") pod \"controller-manager-6c448b59b9-2plpd\" (UID: \"c15a1cc4-68ab-4104-a7a5-c08399fff195\") " pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.760984 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c15a1cc4-68ab-4104-a7a5-c08399fff195-client-ca\") pod \"controller-manager-6c448b59b9-2plpd\" (UID: \"c15a1cc4-68ab-4104-a7a5-c08399fff195\") " pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.761076 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c15a1cc4-68ab-4104-a7a5-c08399fff195-config\") pod \"controller-manager-6c448b59b9-2plpd\" (UID: \"c15a1cc4-68ab-4104-a7a5-c08399fff195\") " pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.761446 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e266b78-e9fa-40bf-844b-7d5e273e988b-client-ca" (OuterVolumeSpecName: "client-ca") pod "4e266b78-e9fa-40bf-844b-7d5e273e988b" (UID: "4e266b78-e9fa-40bf-844b-7d5e273e988b"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.762394 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e266b78-e9fa-40bf-844b-7d5e273e988b-config" (OuterVolumeSpecName: "config") pod "4e266b78-e9fa-40bf-844b-7d5e273e988b" (UID: "4e266b78-e9fa-40bf-844b-7d5e273e988b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.763183 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c15a1cc4-68ab-4104-a7a5-c08399fff195-client-ca\") pod \"controller-manager-6c448b59b9-2plpd\" (UID: \"c15a1cc4-68ab-4104-a7a5-c08399fff195\") " pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.763385 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c15a1cc4-68ab-4104-a7a5-c08399fff195-proxy-ca-bundles\") pod \"controller-manager-6c448b59b9-2plpd\" (UID: \"c15a1cc4-68ab-4104-a7a5-c08399fff195\") " pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.763522 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q79d8\" (UniqueName: \"kubernetes.io/projected/c15a1cc4-68ab-4104-a7a5-c08399fff195-kube-api-access-q79d8\") pod \"controller-manager-6c448b59b9-2plpd\" (UID: \"c15a1cc4-68ab-4104-a7a5-c08399fff195\") " pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.763622 5045 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c9443fc5-a284-4838-a107-6146af9d6bba-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.763644 5045 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4e266b78-e9fa-40bf-844b-7d5e273e988b-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.763662 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e266b78-e9fa-40bf-844b-7d5e273e988b-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.763681 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6x5j\" (UniqueName: \"kubernetes.io/projected/c9443fc5-a284-4838-a107-6146af9d6bba-kube-api-access-v6x5j\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.763704 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9443fc5-a284-4838-a107-6146af9d6bba-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.765959 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e266b78-e9fa-40bf-844b-7d5e273e988b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "4e266b78-e9fa-40bf-844b-7d5e273e988b" (UID: "4e266b78-e9fa-40bf-844b-7d5e273e988b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.766087 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c15a1cc4-68ab-4104-a7a5-c08399fff195-config\") pod \"controller-manager-6c448b59b9-2plpd\" (UID: \"c15a1cc4-68ab-4104-a7a5-c08399fff195\") " pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.766156 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e266b78-e9fa-40bf-844b-7d5e273e988b-kube-api-access-w9w77" (OuterVolumeSpecName: "kube-api-access-w9w77") pod "4e266b78-e9fa-40bf-844b-7d5e273e988b" (UID: "4e266b78-e9fa-40bf-844b-7d5e273e988b"). InnerVolumeSpecName "kube-api-access-w9w77". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.768577 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c15a1cc4-68ab-4104-a7a5-c08399fff195-serving-cert\") pod \"controller-manager-6c448b59b9-2plpd\" (UID: \"c15a1cc4-68ab-4104-a7a5-c08399fff195\") " pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.795780 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q79d8\" (UniqueName: \"kubernetes.io/projected/c15a1cc4-68ab-4104-a7a5-c08399fff195-kube-api-access-q79d8\") pod \"controller-manager-6c448b59b9-2plpd\" (UID: \"c15a1cc4-68ab-4104-a7a5-c08399fff195\") " pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.858682 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.865286 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e266b78-e9fa-40bf-844b-7d5e273e988b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.865331 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9w77\" (UniqueName: \"kubernetes.io/projected/4e266b78-e9fa-40bf-844b-7d5e273e988b-kube-api-access-w9w77\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.954865 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-mnm5x"] Nov 25 23:04:21 crc kubenswrapper[5045]: I1125 23:04:21.964098 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-mnm5x"] Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.161117 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6c448b59b9-2plpd"] Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.410981 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9443fc5-a284-4838-a107-6146af9d6bba" path="/var/lib/kubelet/pods/c9443fc5-a284-4838-a107-6146af9d6bba/volumes" Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.557576 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6c448b59b9-2plpd"] Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.600841 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6"] Nov 25 23:04:22 crc kubenswrapper[5045]: E1125 23:04:22.601271 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e266b78-e9fa-40bf-844b-7d5e273e988b" containerName="route-controller-manager" Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.601308 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e266b78-e9fa-40bf-844b-7d5e273e988b" containerName="route-controller-manager" Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.601789 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e266b78-e9fa-40bf-844b-7d5e273e988b" containerName="route-controller-manager" Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.602965 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.603903 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" event={"ID":"c15a1cc4-68ab-4104-a7a5-c08399fff195","Type":"ContainerStarted","Data":"37016def4b83957dd639eab66cc69c2a0ab55b0a157fe3306d51e8f62f06f631"} Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.603958 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" event={"ID":"c15a1cc4-68ab-4104-a7a5-c08399fff195","Type":"ContainerStarted","Data":"27f3b08d34b74e100d3378e1cc6b557e44463d53afde7e4119957a4cbb58c7e0"} Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.603997 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9" Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.639065 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6"] Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.665231 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9"] Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.668388 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-h6sq9"] Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.678424 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-config\") pod \"route-controller-manager-b7946cdf7-wtzd6\" (UID: \"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.678491 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-client-ca\") pod \"route-controller-manager-b7946cdf7-wtzd6\" (UID: \"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.678519 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-serving-cert\") pod \"route-controller-manager-b7946cdf7-wtzd6\" (UID: \"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.678556 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8snjw\" (UniqueName: \"kubernetes.io/projected/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-kube-api-access-8snjw\") pod \"route-controller-manager-b7946cdf7-wtzd6\" (UID: \"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.780140 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-client-ca\") pod \"route-controller-manager-b7946cdf7-wtzd6\" (UID: \"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.780187 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-serving-cert\") pod \"route-controller-manager-b7946cdf7-wtzd6\" (UID: \"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.780234 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8snjw\" (UniqueName: \"kubernetes.io/projected/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-kube-api-access-8snjw\") pod \"route-controller-manager-b7946cdf7-wtzd6\" (UID: \"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.780270 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-config\") pod \"route-controller-manager-b7946cdf7-wtzd6\" (UID: \"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.781298 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-client-ca\") pod \"route-controller-manager-b7946cdf7-wtzd6\" (UID: \"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.781378 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-config\") pod \"route-controller-manager-b7946cdf7-wtzd6\" (UID: \"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.785161 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-serving-cert\") pod \"route-controller-manager-b7946cdf7-wtzd6\" (UID: \"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.796070 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8snjw\" (UniqueName: \"kubernetes.io/projected/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-kube-api-access-8snjw\") pod \"route-controller-manager-b7946cdf7-wtzd6\" (UID: \"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" Nov 25 23:04:22 crc kubenswrapper[5045]: I1125 23:04:22.927431 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" Nov 25 23:04:23 crc kubenswrapper[5045]: I1125 23:04:23.166857 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6"] Nov 25 23:04:23 crc kubenswrapper[5045]: I1125 23:04:23.610594 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" event={"ID":"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422","Type":"ContainerStarted","Data":"ffd1736a521390c87ebdcde427886bfab022de7fb4bf6b5b2aafbee9b0be11ec"} Nov 25 23:04:23 crc kubenswrapper[5045]: I1125 23:04:23.610671 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" podUID="c15a1cc4-68ab-4104-a7a5-c08399fff195" containerName="controller-manager" containerID="cri-o://37016def4b83957dd639eab66cc69c2a0ab55b0a157fe3306d51e8f62f06f631" gracePeriod=30 Nov 25 23:04:23 crc kubenswrapper[5045]: I1125 23:04:23.610906 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:23 crc kubenswrapper[5045]: I1125 23:04:23.615479 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:23 crc kubenswrapper[5045]: I1125 23:04:23.631486 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" podStartSLOduration=3.631443554 podStartE2EDuration="3.631443554s" podCreationTimestamp="2025-11-25 23:04:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:04:23.627852036 +0000 UTC m=+319.985511158" watchObservedRunningTime="2025-11-25 23:04:23.631443554 +0000 UTC m=+319.989102666" Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.412461 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e266b78-e9fa-40bf-844b-7d5e273e988b" path="/var/lib/kubelet/pods/4e266b78-e9fa-40bf-844b-7d5e273e988b/volumes" Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.617364 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" event={"ID":"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422","Type":"ContainerStarted","Data":"fbefdcd9ea5f6867164c74be0fefcf46070158c7c61744350c51c610272e6163"} Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.617841 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.619952 5045 generic.go:334] "Generic (PLEG): container finished" podID="c15a1cc4-68ab-4104-a7a5-c08399fff195" containerID="37016def4b83957dd639eab66cc69c2a0ab55b0a157fe3306d51e8f62f06f631" exitCode=0 Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.620191 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" event={"ID":"c15a1cc4-68ab-4104-a7a5-c08399fff195","Type":"ContainerDied","Data":"37016def4b83957dd639eab66cc69c2a0ab55b0a157fe3306d51e8f62f06f631"} Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.621972 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.648671 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" podStartSLOduration=2.648651207 podStartE2EDuration="2.648651207s" podCreationTimestamp="2025-11-25 23:04:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:04:24.632302582 +0000 UTC m=+320.989961694" watchObservedRunningTime="2025-11-25 23:04:24.648651207 +0000 UTC m=+321.006310319" Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.654480 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.803430 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c15a1cc4-68ab-4104-a7a5-c08399fff195-serving-cert\") pod \"c15a1cc4-68ab-4104-a7a5-c08399fff195\" (UID: \"c15a1cc4-68ab-4104-a7a5-c08399fff195\") " Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.804625 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c15a1cc4-68ab-4104-a7a5-c08399fff195-client-ca\") pod \"c15a1cc4-68ab-4104-a7a5-c08399fff195\" (UID: \"c15a1cc4-68ab-4104-a7a5-c08399fff195\") " Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.804746 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c15a1cc4-68ab-4104-a7a5-c08399fff195-config\") pod \"c15a1cc4-68ab-4104-a7a5-c08399fff195\" (UID: \"c15a1cc4-68ab-4104-a7a5-c08399fff195\") " Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.804780 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c15a1cc4-68ab-4104-a7a5-c08399fff195-proxy-ca-bundles\") pod \"c15a1cc4-68ab-4104-a7a5-c08399fff195\" (UID: \"c15a1cc4-68ab-4104-a7a5-c08399fff195\") " Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.804812 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q79d8\" (UniqueName: \"kubernetes.io/projected/c15a1cc4-68ab-4104-a7a5-c08399fff195-kube-api-access-q79d8\") pod \"c15a1cc4-68ab-4104-a7a5-c08399fff195\" (UID: \"c15a1cc4-68ab-4104-a7a5-c08399fff195\") " Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.805830 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c15a1cc4-68ab-4104-a7a5-c08399fff195-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "c15a1cc4-68ab-4104-a7a5-c08399fff195" (UID: "c15a1cc4-68ab-4104-a7a5-c08399fff195"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.805839 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c15a1cc4-68ab-4104-a7a5-c08399fff195-client-ca" (OuterVolumeSpecName: "client-ca") pod "c15a1cc4-68ab-4104-a7a5-c08399fff195" (UID: "c15a1cc4-68ab-4104-a7a5-c08399fff195"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.806023 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c15a1cc4-68ab-4104-a7a5-c08399fff195-config" (OuterVolumeSpecName: "config") pod "c15a1cc4-68ab-4104-a7a5-c08399fff195" (UID: "c15a1cc4-68ab-4104-a7a5-c08399fff195"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.808991 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c15a1cc4-68ab-4104-a7a5-c08399fff195-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c15a1cc4-68ab-4104-a7a5-c08399fff195" (UID: "c15a1cc4-68ab-4104-a7a5-c08399fff195"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.809114 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c15a1cc4-68ab-4104-a7a5-c08399fff195-kube-api-access-q79d8" (OuterVolumeSpecName: "kube-api-access-q79d8") pod "c15a1cc4-68ab-4104-a7a5-c08399fff195" (UID: "c15a1cc4-68ab-4104-a7a5-c08399fff195"). InnerVolumeSpecName "kube-api-access-q79d8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.906565 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c15a1cc4-68ab-4104-a7a5-c08399fff195-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.906618 5045 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c15a1cc4-68ab-4104-a7a5-c08399fff195-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.906639 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q79d8\" (UniqueName: \"kubernetes.io/projected/c15a1cc4-68ab-4104-a7a5-c08399fff195-kube-api-access-q79d8\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.906658 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c15a1cc4-68ab-4104-a7a5-c08399fff195-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:24 crc kubenswrapper[5045]: I1125 23:04:24.906676 5045 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c15a1cc4-68ab-4104-a7a5-c08399fff195-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:25 crc kubenswrapper[5045]: I1125 23:04:25.629993 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" event={"ID":"c15a1cc4-68ab-4104-a7a5-c08399fff195","Type":"ContainerDied","Data":"27f3b08d34b74e100d3378e1cc6b557e44463d53afde7e4119957a4cbb58c7e0"} Nov 25 23:04:25 crc kubenswrapper[5045]: I1125 23:04:25.630016 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c448b59b9-2plpd" Nov 25 23:04:25 crc kubenswrapper[5045]: I1125 23:04:25.630077 5045 scope.go:117] "RemoveContainer" containerID="37016def4b83957dd639eab66cc69c2a0ab55b0a157fe3306d51e8f62f06f631" Nov 25 23:04:25 crc kubenswrapper[5045]: I1125 23:04:25.677835 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6c448b59b9-2plpd"] Nov 25 23:04:25 crc kubenswrapper[5045]: I1125 23:04:25.684601 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6c448b59b9-2plpd"] Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.410835 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c15a1cc4-68ab-4104-a7a5-c08399fff195" path="/var/lib/kubelet/pods/c15a1cc4-68ab-4104-a7a5-c08399fff195/volumes" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.568991 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5b768f5b95-ngpt2"] Nov 25 23:04:26 crc kubenswrapper[5045]: E1125 23:04:26.569300 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c15a1cc4-68ab-4104-a7a5-c08399fff195" containerName="controller-manager" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.569327 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="c15a1cc4-68ab-4104-a7a5-c08399fff195" containerName="controller-manager" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.569485 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="c15a1cc4-68ab-4104-a7a5-c08399fff195" containerName="controller-manager" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.570169 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.577360 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5b768f5b95-ngpt2"] Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.577429 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.578146 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.578222 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.578318 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.578417 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.578755 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.582042 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.630772 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sp8gw\" (UniqueName: \"kubernetes.io/projected/6a331361-3e07-4663-8c41-7c5557c66497-kube-api-access-sp8gw\") pod \"controller-manager-5b768f5b95-ngpt2\" (UID: \"6a331361-3e07-4663-8c41-7c5557c66497\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.631024 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6a331361-3e07-4663-8c41-7c5557c66497-client-ca\") pod \"controller-manager-5b768f5b95-ngpt2\" (UID: \"6a331361-3e07-4663-8c41-7c5557c66497\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.631137 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a331361-3e07-4663-8c41-7c5557c66497-serving-cert\") pod \"controller-manager-5b768f5b95-ngpt2\" (UID: \"6a331361-3e07-4663-8c41-7c5557c66497\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.631238 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6a331361-3e07-4663-8c41-7c5557c66497-proxy-ca-bundles\") pod \"controller-manager-5b768f5b95-ngpt2\" (UID: \"6a331361-3e07-4663-8c41-7c5557c66497\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.631337 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a331361-3e07-4663-8c41-7c5557c66497-config\") pod \"controller-manager-5b768f5b95-ngpt2\" (UID: \"6a331361-3e07-4663-8c41-7c5557c66497\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.739649 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sp8gw\" (UniqueName: \"kubernetes.io/projected/6a331361-3e07-4663-8c41-7c5557c66497-kube-api-access-sp8gw\") pod \"controller-manager-5b768f5b95-ngpt2\" (UID: \"6a331361-3e07-4663-8c41-7c5557c66497\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.739755 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6a331361-3e07-4663-8c41-7c5557c66497-client-ca\") pod \"controller-manager-5b768f5b95-ngpt2\" (UID: \"6a331361-3e07-4663-8c41-7c5557c66497\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.739800 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a331361-3e07-4663-8c41-7c5557c66497-serving-cert\") pod \"controller-manager-5b768f5b95-ngpt2\" (UID: \"6a331361-3e07-4663-8c41-7c5557c66497\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.739831 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6a331361-3e07-4663-8c41-7c5557c66497-proxy-ca-bundles\") pod \"controller-manager-5b768f5b95-ngpt2\" (UID: \"6a331361-3e07-4663-8c41-7c5557c66497\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.739878 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a331361-3e07-4663-8c41-7c5557c66497-config\") pod \"controller-manager-5b768f5b95-ngpt2\" (UID: \"6a331361-3e07-4663-8c41-7c5557c66497\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.740587 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6a331361-3e07-4663-8c41-7c5557c66497-client-ca\") pod \"controller-manager-5b768f5b95-ngpt2\" (UID: \"6a331361-3e07-4663-8c41-7c5557c66497\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.741326 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a331361-3e07-4663-8c41-7c5557c66497-config\") pod \"controller-manager-5b768f5b95-ngpt2\" (UID: \"6a331361-3e07-4663-8c41-7c5557c66497\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.741662 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6a331361-3e07-4663-8c41-7c5557c66497-proxy-ca-bundles\") pod \"controller-manager-5b768f5b95-ngpt2\" (UID: \"6a331361-3e07-4663-8c41-7c5557c66497\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.746459 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a331361-3e07-4663-8c41-7c5557c66497-serving-cert\") pod \"controller-manager-5b768f5b95-ngpt2\" (UID: \"6a331361-3e07-4663-8c41-7c5557c66497\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.756130 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sp8gw\" (UniqueName: \"kubernetes.io/projected/6a331361-3e07-4663-8c41-7c5557c66497-kube-api-access-sp8gw\") pod \"controller-manager-5b768f5b95-ngpt2\" (UID: \"6a331361-3e07-4663-8c41-7c5557c66497\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:26 crc kubenswrapper[5045]: I1125 23:04:26.892475 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:27 crc kubenswrapper[5045]: I1125 23:04:27.174616 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5b768f5b95-ngpt2"] Nov 25 23:04:27 crc kubenswrapper[5045]: I1125 23:04:27.644044 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" event={"ID":"6a331361-3e07-4663-8c41-7c5557c66497","Type":"ContainerStarted","Data":"1125bc7cfa4acd8e2437ebb916af32e3323bad9a9ff14928479204fead089cbe"} Nov 25 23:04:28 crc kubenswrapper[5045]: I1125 23:04:28.651093 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" event={"ID":"6a331361-3e07-4663-8c41-7c5557c66497","Type":"ContainerStarted","Data":"fe48a0122fe48d44a641c4145e977c4858f685ecbf4c708ba031e6d724b4705b"} Nov 25 23:04:28 crc kubenswrapper[5045]: I1125 23:04:28.651445 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:28 crc kubenswrapper[5045]: I1125 23:04:28.661569 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:28 crc kubenswrapper[5045]: I1125 23:04:28.675552 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" podStartSLOduration=6.675535296 podStartE2EDuration="6.675535296s" podCreationTimestamp="2025-11-25 23:04:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:04:28.673524985 +0000 UTC m=+325.031184097" watchObservedRunningTime="2025-11-25 23:04:28.675535296 +0000 UTC m=+325.033194408" Nov 25 23:04:30 crc kubenswrapper[5045]: I1125 23:04:30.540876 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:04:30 crc kubenswrapper[5045]: I1125 23:04:30.541260 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:04:32 crc kubenswrapper[5045]: I1125 23:04:32.527053 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5b768f5b95-ngpt2"] Nov 25 23:04:32 crc kubenswrapper[5045]: I1125 23:04:32.527357 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" podUID="6a331361-3e07-4663-8c41-7c5557c66497" containerName="controller-manager" containerID="cri-o://fe48a0122fe48d44a641c4145e977c4858f685ecbf4c708ba031e6d724b4705b" gracePeriod=30 Nov 25 23:04:32 crc kubenswrapper[5045]: I1125 23:04:32.540035 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6"] Nov 25 23:04:32 crc kubenswrapper[5045]: I1125 23:04:32.540481 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" podUID="55b18bd4-aab8-4e5e-b6e5-fcb01bc76422" containerName="route-controller-manager" containerID="cri-o://fbefdcd9ea5f6867164c74be0fefcf46070158c7c61744350c51c610272e6163" gracePeriod=30 Nov 25 23:04:32 crc kubenswrapper[5045]: I1125 23:04:32.928291 5045 patch_prober.go:28] interesting pod/route-controller-manager-b7946cdf7-wtzd6 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.58:8443/healthz\": dial tcp 10.217.0.58:8443: connect: connection refused" start-of-body= Nov 25 23:04:32 crc kubenswrapper[5045]: I1125 23:04:32.928665 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" podUID="55b18bd4-aab8-4e5e-b6e5-fcb01bc76422" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.58:8443/healthz\": dial tcp 10.217.0.58:8443: connect: connection refused" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.565637 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.596996 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb"] Nov 25 23:04:33 crc kubenswrapper[5045]: E1125 23:04:33.597297 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55b18bd4-aab8-4e5e-b6e5-fcb01bc76422" containerName="route-controller-manager" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.597317 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="55b18bd4-aab8-4e5e-b6e5-fcb01bc76422" containerName="route-controller-manager" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.597486 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="55b18bd4-aab8-4e5e-b6e5-fcb01bc76422" containerName="route-controller-manager" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.598029 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.619689 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb"] Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.636681 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-config\") pod \"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422\" (UID: \"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422\") " Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.636821 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-client-ca\") pod \"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422\" (UID: \"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422\") " Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.636888 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-serving-cert\") pod \"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422\" (UID: \"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422\") " Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.636977 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8snjw\" (UniqueName: \"kubernetes.io/projected/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-kube-api-access-8snjw\") pod \"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422\" (UID: \"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422\") " Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.637288 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7efb020-9452-4b0d-ad84-8b2c0f028f92-config\") pod \"route-controller-manager-6c894cb598-x4tkb\" (UID: \"e7efb020-9452-4b0d-ad84-8b2c0f028f92\") " pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.637357 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e7efb020-9452-4b0d-ad84-8b2c0f028f92-client-ca\") pod \"route-controller-manager-6c894cb598-x4tkb\" (UID: \"e7efb020-9452-4b0d-ad84-8b2c0f028f92\") " pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.637385 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lds4f\" (UniqueName: \"kubernetes.io/projected/e7efb020-9452-4b0d-ad84-8b2c0f028f92-kube-api-access-lds4f\") pod \"route-controller-manager-6c894cb598-x4tkb\" (UID: \"e7efb020-9452-4b0d-ad84-8b2c0f028f92\") " pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.637438 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7efb020-9452-4b0d-ad84-8b2c0f028f92-serving-cert\") pod \"route-controller-manager-6c894cb598-x4tkb\" (UID: \"e7efb020-9452-4b0d-ad84-8b2c0f028f92\") " pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.637569 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-client-ca" (OuterVolumeSpecName: "client-ca") pod "55b18bd4-aab8-4e5e-b6e5-fcb01bc76422" (UID: "55b18bd4-aab8-4e5e-b6e5-fcb01bc76422"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.637640 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-config" (OuterVolumeSpecName: "config") pod "55b18bd4-aab8-4e5e-b6e5-fcb01bc76422" (UID: "55b18bd4-aab8-4e5e-b6e5-fcb01bc76422"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.643433 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "55b18bd4-aab8-4e5e-b6e5-fcb01bc76422" (UID: "55b18bd4-aab8-4e5e-b6e5-fcb01bc76422"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.645191 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-kube-api-access-8snjw" (OuterVolumeSpecName: "kube-api-access-8snjw") pod "55b18bd4-aab8-4e5e-b6e5-fcb01bc76422" (UID: "55b18bd4-aab8-4e5e-b6e5-fcb01bc76422"). InnerVolumeSpecName "kube-api-access-8snjw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.680891 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.697121 5045 generic.go:334] "Generic (PLEG): container finished" podID="55b18bd4-aab8-4e5e-b6e5-fcb01bc76422" containerID="fbefdcd9ea5f6867164c74be0fefcf46070158c7c61744350c51c610272e6163" exitCode=0 Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.697182 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" event={"ID":"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422","Type":"ContainerDied","Data":"fbefdcd9ea5f6867164c74be0fefcf46070158c7c61744350c51c610272e6163"} Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.697207 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" event={"ID":"55b18bd4-aab8-4e5e-b6e5-fcb01bc76422","Type":"ContainerDied","Data":"ffd1736a521390c87ebdcde427886bfab022de7fb4bf6b5b2aafbee9b0be11ec"} Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.697224 5045 scope.go:117] "RemoveContainer" containerID="fbefdcd9ea5f6867164c74be0fefcf46070158c7c61744350c51c610272e6163" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.697322 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.702317 5045 generic.go:334] "Generic (PLEG): container finished" podID="6a331361-3e07-4663-8c41-7c5557c66497" containerID="fe48a0122fe48d44a641c4145e977c4858f685ecbf4c708ba031e6d724b4705b" exitCode=0 Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.702360 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" event={"ID":"6a331361-3e07-4663-8c41-7c5557c66497","Type":"ContainerDied","Data":"fe48a0122fe48d44a641c4145e977c4858f685ecbf4c708ba031e6d724b4705b"} Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.702385 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" event={"ID":"6a331361-3e07-4663-8c41-7c5557c66497","Type":"ContainerDied","Data":"1125bc7cfa4acd8e2437ebb916af32e3323bad9a9ff14928479204fead089cbe"} Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.702425 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b768f5b95-ngpt2" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.715911 5045 scope.go:117] "RemoveContainer" containerID="fbefdcd9ea5f6867164c74be0fefcf46070158c7c61744350c51c610272e6163" Nov 25 23:04:33 crc kubenswrapper[5045]: E1125 23:04:33.716873 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fbefdcd9ea5f6867164c74be0fefcf46070158c7c61744350c51c610272e6163\": container with ID starting with fbefdcd9ea5f6867164c74be0fefcf46070158c7c61744350c51c610272e6163 not found: ID does not exist" containerID="fbefdcd9ea5f6867164c74be0fefcf46070158c7c61744350c51c610272e6163" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.716933 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fbefdcd9ea5f6867164c74be0fefcf46070158c7c61744350c51c610272e6163"} err="failed to get container status \"fbefdcd9ea5f6867164c74be0fefcf46070158c7c61744350c51c610272e6163\": rpc error: code = NotFound desc = could not find container \"fbefdcd9ea5f6867164c74be0fefcf46070158c7c61744350c51c610272e6163\": container with ID starting with fbefdcd9ea5f6867164c74be0fefcf46070158c7c61744350c51c610272e6163 not found: ID does not exist" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.716951 5045 scope.go:117] "RemoveContainer" containerID="fe48a0122fe48d44a641c4145e977c4858f685ecbf4c708ba031e6d724b4705b" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.725762 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6"] Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.729405 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b7946cdf7-wtzd6"] Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.733140 5045 scope.go:117] "RemoveContainer" containerID="fe48a0122fe48d44a641c4145e977c4858f685ecbf4c708ba031e6d724b4705b" Nov 25 23:04:33 crc kubenswrapper[5045]: E1125 23:04:33.733625 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe48a0122fe48d44a641c4145e977c4858f685ecbf4c708ba031e6d724b4705b\": container with ID starting with fe48a0122fe48d44a641c4145e977c4858f685ecbf4c708ba031e6d724b4705b not found: ID does not exist" containerID="fe48a0122fe48d44a641c4145e977c4858f685ecbf4c708ba031e6d724b4705b" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.733752 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe48a0122fe48d44a641c4145e977c4858f685ecbf4c708ba031e6d724b4705b"} err="failed to get container status \"fe48a0122fe48d44a641c4145e977c4858f685ecbf4c708ba031e6d724b4705b\": rpc error: code = NotFound desc = could not find container \"fe48a0122fe48d44a641c4145e977c4858f685ecbf4c708ba031e6d724b4705b\": container with ID starting with fe48a0122fe48d44a641c4145e977c4858f685ecbf4c708ba031e6d724b4705b not found: ID does not exist" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.738389 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sp8gw\" (UniqueName: \"kubernetes.io/projected/6a331361-3e07-4663-8c41-7c5557c66497-kube-api-access-sp8gw\") pod \"6a331361-3e07-4663-8c41-7c5557c66497\" (UID: \"6a331361-3e07-4663-8c41-7c5557c66497\") " Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.738503 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a331361-3e07-4663-8c41-7c5557c66497-serving-cert\") pod \"6a331361-3e07-4663-8c41-7c5557c66497\" (UID: \"6a331361-3e07-4663-8c41-7c5557c66497\") " Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.738595 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a331361-3e07-4663-8c41-7c5557c66497-config\") pod \"6a331361-3e07-4663-8c41-7c5557c66497\" (UID: \"6a331361-3e07-4663-8c41-7c5557c66497\") " Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.738682 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6a331361-3e07-4663-8c41-7c5557c66497-client-ca\") pod \"6a331361-3e07-4663-8c41-7c5557c66497\" (UID: \"6a331361-3e07-4663-8c41-7c5557c66497\") " Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.738799 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6a331361-3e07-4663-8c41-7c5557c66497-proxy-ca-bundles\") pod \"6a331361-3e07-4663-8c41-7c5557c66497\" (UID: \"6a331361-3e07-4663-8c41-7c5557c66497\") " Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.739001 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e7efb020-9452-4b0d-ad84-8b2c0f028f92-client-ca\") pod \"route-controller-manager-6c894cb598-x4tkb\" (UID: \"e7efb020-9452-4b0d-ad84-8b2c0f028f92\") " pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.739083 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lds4f\" (UniqueName: \"kubernetes.io/projected/e7efb020-9452-4b0d-ad84-8b2c0f028f92-kube-api-access-lds4f\") pod \"route-controller-manager-6c894cb598-x4tkb\" (UID: \"e7efb020-9452-4b0d-ad84-8b2c0f028f92\") " pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.739164 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7efb020-9452-4b0d-ad84-8b2c0f028f92-serving-cert\") pod \"route-controller-manager-6c894cb598-x4tkb\" (UID: \"e7efb020-9452-4b0d-ad84-8b2c0f028f92\") " pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.739284 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7efb020-9452-4b0d-ad84-8b2c0f028f92-config\") pod \"route-controller-manager-6c894cb598-x4tkb\" (UID: \"e7efb020-9452-4b0d-ad84-8b2c0f028f92\") " pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.740047 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e7efb020-9452-4b0d-ad84-8b2c0f028f92-client-ca\") pod \"route-controller-manager-6c894cb598-x4tkb\" (UID: \"e7efb020-9452-4b0d-ad84-8b2c0f028f92\") " pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.740429 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a331361-3e07-4663-8c41-7c5557c66497-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "6a331361-3e07-4663-8c41-7c5557c66497" (UID: "6a331361-3e07-4663-8c41-7c5557c66497"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.740555 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a331361-3e07-4663-8c41-7c5557c66497-client-ca" (OuterVolumeSpecName: "client-ca") pod "6a331361-3e07-4663-8c41-7c5557c66497" (UID: "6a331361-3e07-4663-8c41-7c5557c66497"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.741073 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.741181 5045 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.741271 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.741362 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8snjw\" (UniqueName: \"kubernetes.io/projected/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422-kube-api-access-8snjw\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.741385 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a331361-3e07-4663-8c41-7c5557c66497-config" (OuterVolumeSpecName: "config") pod "6a331361-3e07-4663-8c41-7c5557c66497" (UID: "6a331361-3e07-4663-8c41-7c5557c66497"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.741098 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7efb020-9452-4b0d-ad84-8b2c0f028f92-config\") pod \"route-controller-manager-6c894cb598-x4tkb\" (UID: \"e7efb020-9452-4b0d-ad84-8b2c0f028f92\") " pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.742053 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a331361-3e07-4663-8c41-7c5557c66497-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6a331361-3e07-4663-8c41-7c5557c66497" (UID: "6a331361-3e07-4663-8c41-7c5557c66497"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.743454 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7efb020-9452-4b0d-ad84-8b2c0f028f92-serving-cert\") pod \"route-controller-manager-6c894cb598-x4tkb\" (UID: \"e7efb020-9452-4b0d-ad84-8b2c0f028f92\") " pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.745899 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a331361-3e07-4663-8c41-7c5557c66497-kube-api-access-sp8gw" (OuterVolumeSpecName: "kube-api-access-sp8gw") pod "6a331361-3e07-4663-8c41-7c5557c66497" (UID: "6a331361-3e07-4663-8c41-7c5557c66497"). InnerVolumeSpecName "kube-api-access-sp8gw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.760133 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lds4f\" (UniqueName: \"kubernetes.io/projected/e7efb020-9452-4b0d-ad84-8b2c0f028f92-kube-api-access-lds4f\") pod \"route-controller-manager-6c894cb598-x4tkb\" (UID: \"e7efb020-9452-4b0d-ad84-8b2c0f028f92\") " pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.843018 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sp8gw\" (UniqueName: \"kubernetes.io/projected/6a331361-3e07-4663-8c41-7c5557c66497-kube-api-access-sp8gw\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.843049 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a331361-3e07-4663-8c41-7c5557c66497-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.843058 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a331361-3e07-4663-8c41-7c5557c66497-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.843068 5045 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6a331361-3e07-4663-8c41-7c5557c66497-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.843078 5045 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6a331361-3e07-4663-8c41-7c5557c66497-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:33 crc kubenswrapper[5045]: I1125 23:04:33.922548 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" Nov 25 23:04:34 crc kubenswrapper[5045]: I1125 23:04:34.035412 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5b768f5b95-ngpt2"] Nov 25 23:04:34 crc kubenswrapper[5045]: I1125 23:04:34.043992 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-5b768f5b95-ngpt2"] Nov 25 23:04:34 crc kubenswrapper[5045]: I1125 23:04:34.211747 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb"] Nov 25 23:04:34 crc kubenswrapper[5045]: W1125 23:04:34.218510 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode7efb020_9452_4b0d_ad84_8b2c0f028f92.slice/crio-396324ac53a2302200030dcd91d6ee24f868885b24b92555b8d9eb87c24e8738 WatchSource:0}: Error finding container 396324ac53a2302200030dcd91d6ee24f868885b24b92555b8d9eb87c24e8738: Status 404 returned error can't find the container with id 396324ac53a2302200030dcd91d6ee24f868885b24b92555b8d9eb87c24e8738 Nov 25 23:04:34 crc kubenswrapper[5045]: I1125 23:04:34.416910 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55b18bd4-aab8-4e5e-b6e5-fcb01bc76422" path="/var/lib/kubelet/pods/55b18bd4-aab8-4e5e-b6e5-fcb01bc76422/volumes" Nov 25 23:04:34 crc kubenswrapper[5045]: I1125 23:04:34.418454 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a331361-3e07-4663-8c41-7c5557c66497" path="/var/lib/kubelet/pods/6a331361-3e07-4663-8c41-7c5557c66497/volumes" Nov 25 23:04:34 crc kubenswrapper[5045]: I1125 23:04:34.709307 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" event={"ID":"e7efb020-9452-4b0d-ad84-8b2c0f028f92","Type":"ContainerStarted","Data":"25b82f10321dcd6f9d32791b6dd6d926f0fef5e2ee0d8e6c57dd5018977a9229"} Nov 25 23:04:34 crc kubenswrapper[5045]: I1125 23:04:34.709353 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" event={"ID":"e7efb020-9452-4b0d-ad84-8b2c0f028f92","Type":"ContainerStarted","Data":"396324ac53a2302200030dcd91d6ee24f868885b24b92555b8d9eb87c24e8738"} Nov 25 23:04:34 crc kubenswrapper[5045]: I1125 23:04:34.710908 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" Nov 25 23:04:34 crc kubenswrapper[5045]: I1125 23:04:34.738485 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" podStartSLOduration=2.738459358 podStartE2EDuration="2.738459358s" podCreationTimestamp="2025-11-25 23:04:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:04:34.726981517 +0000 UTC m=+331.084640649" watchObservedRunningTime="2025-11-25 23:04:34.738459358 +0000 UTC m=+331.096118480" Nov 25 23:04:34 crc kubenswrapper[5045]: I1125 23:04:34.941260 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.577152 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs"] Nov 25 23:04:36 crc kubenswrapper[5045]: E1125 23:04:36.577974 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a331361-3e07-4663-8c41-7c5557c66497" containerName="controller-manager" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.578006 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a331361-3e07-4663-8c41-7c5557c66497" containerName="controller-manager" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.578255 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a331361-3e07-4663-8c41-7c5557c66497" containerName="controller-manager" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.579131 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.582369 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.582794 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.584400 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.585446 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.585751 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.587694 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.591323 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs"] Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.593158 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.690360 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e92b4d0-05e8-4cba-b505-1f66b64d704f-config\") pod \"controller-manager-6b7fdcdf76-fmrcs\" (UID: \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\") " pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.690470 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5e92b4d0-05e8-4cba-b505-1f66b64d704f-proxy-ca-bundles\") pod \"controller-manager-6b7fdcdf76-fmrcs\" (UID: \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\") " pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.690591 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5e92b4d0-05e8-4cba-b505-1f66b64d704f-serving-cert\") pod \"controller-manager-6b7fdcdf76-fmrcs\" (UID: \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\") " pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.690676 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nn8f\" (UniqueName: \"kubernetes.io/projected/5e92b4d0-05e8-4cba-b505-1f66b64d704f-kube-api-access-2nn8f\") pod \"controller-manager-6b7fdcdf76-fmrcs\" (UID: \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\") " pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.690798 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5e92b4d0-05e8-4cba-b505-1f66b64d704f-client-ca\") pod \"controller-manager-6b7fdcdf76-fmrcs\" (UID: \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\") " pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.792521 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e92b4d0-05e8-4cba-b505-1f66b64d704f-config\") pod \"controller-manager-6b7fdcdf76-fmrcs\" (UID: \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\") " pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.792594 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5e92b4d0-05e8-4cba-b505-1f66b64d704f-proxy-ca-bundles\") pod \"controller-manager-6b7fdcdf76-fmrcs\" (UID: \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\") " pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.792678 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5e92b4d0-05e8-4cba-b505-1f66b64d704f-serving-cert\") pod \"controller-manager-6b7fdcdf76-fmrcs\" (UID: \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\") " pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.793645 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2nn8f\" (UniqueName: \"kubernetes.io/projected/5e92b4d0-05e8-4cba-b505-1f66b64d704f-kube-api-access-2nn8f\") pod \"controller-manager-6b7fdcdf76-fmrcs\" (UID: \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\") " pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.793769 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5e92b4d0-05e8-4cba-b505-1f66b64d704f-client-ca\") pod \"controller-manager-6b7fdcdf76-fmrcs\" (UID: \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\") " pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.795121 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5e92b4d0-05e8-4cba-b505-1f66b64d704f-client-ca\") pod \"controller-manager-6b7fdcdf76-fmrcs\" (UID: \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\") " pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.795590 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5e92b4d0-05e8-4cba-b505-1f66b64d704f-proxy-ca-bundles\") pod \"controller-manager-6b7fdcdf76-fmrcs\" (UID: \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\") " pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.795917 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e92b4d0-05e8-4cba-b505-1f66b64d704f-config\") pod \"controller-manager-6b7fdcdf76-fmrcs\" (UID: \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\") " pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.802909 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5e92b4d0-05e8-4cba-b505-1f66b64d704f-serving-cert\") pod \"controller-manager-6b7fdcdf76-fmrcs\" (UID: \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\") " pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.823973 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nn8f\" (UniqueName: \"kubernetes.io/projected/5e92b4d0-05e8-4cba-b505-1f66b64d704f-kube-api-access-2nn8f\") pod \"controller-manager-6b7fdcdf76-fmrcs\" (UID: \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\") " pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:36 crc kubenswrapper[5045]: I1125 23:04:36.906791 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:37 crc kubenswrapper[5045]: I1125 23:04:37.333982 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs"] Nov 25 23:04:37 crc kubenswrapper[5045]: W1125 23:04:37.348796 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5e92b4d0_05e8_4cba_b505_1f66b64d704f.slice/crio-dcc1b41da02c8ffbdf9b2805f87bec298240f97ce7593cd54e1df46fea0359b8 WatchSource:0}: Error finding container dcc1b41da02c8ffbdf9b2805f87bec298240f97ce7593cd54e1df46fea0359b8: Status 404 returned error can't find the container with id dcc1b41da02c8ffbdf9b2805f87bec298240f97ce7593cd54e1df46fea0359b8 Nov 25 23:04:37 crc kubenswrapper[5045]: I1125 23:04:37.739414 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" event={"ID":"5e92b4d0-05e8-4cba-b505-1f66b64d704f","Type":"ContainerStarted","Data":"82e0d295b331a642ab2229203c4ee921d769a5a9d1e62c8343ea2f7d4bab8c0c"} Nov 25 23:04:37 crc kubenswrapper[5045]: I1125 23:04:37.739763 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" event={"ID":"5e92b4d0-05e8-4cba-b505-1f66b64d704f","Type":"ContainerStarted","Data":"dcc1b41da02c8ffbdf9b2805f87bec298240f97ce7593cd54e1df46fea0359b8"} Nov 25 23:04:37 crc kubenswrapper[5045]: I1125 23:04:37.739797 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:37 crc kubenswrapper[5045]: I1125 23:04:37.749625 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:37 crc kubenswrapper[5045]: I1125 23:04:37.771545 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" podStartSLOduration=5.771512781 podStartE2EDuration="5.771512781s" podCreationTimestamp="2025-11-25 23:04:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:04:37.764806521 +0000 UTC m=+334.122465673" watchObservedRunningTime="2025-11-25 23:04:37.771512781 +0000 UTC m=+334.129171933" Nov 25 23:04:40 crc kubenswrapper[5045]: I1125 23:04:40.376500 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs"] Nov 25 23:04:40 crc kubenswrapper[5045]: I1125 23:04:40.395294 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb"] Nov 25 23:04:40 crc kubenswrapper[5045]: I1125 23:04:40.395523 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" podUID="e7efb020-9452-4b0d-ad84-8b2c0f028f92" containerName="route-controller-manager" containerID="cri-o://25b82f10321dcd6f9d32791b6dd6d926f0fef5e2ee0d8e6c57dd5018977a9229" gracePeriod=30 Nov 25 23:04:40 crc kubenswrapper[5045]: I1125 23:04:40.762761 5045 generic.go:334] "Generic (PLEG): container finished" podID="e7efb020-9452-4b0d-ad84-8b2c0f028f92" containerID="25b82f10321dcd6f9d32791b6dd6d926f0fef5e2ee0d8e6c57dd5018977a9229" exitCode=0 Nov 25 23:04:40 crc kubenswrapper[5045]: I1125 23:04:40.762856 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" event={"ID":"e7efb020-9452-4b0d-ad84-8b2c0f028f92","Type":"ContainerDied","Data":"25b82f10321dcd6f9d32791b6dd6d926f0fef5e2ee0d8e6c57dd5018977a9229"} Nov 25 23:04:40 crc kubenswrapper[5045]: I1125 23:04:40.763250 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" podUID="5e92b4d0-05e8-4cba-b505-1f66b64d704f" containerName="controller-manager" containerID="cri-o://82e0d295b331a642ab2229203c4ee921d769a5a9d1e62c8343ea2f7d4bab8c0c" gracePeriod=30 Nov 25 23:04:40 crc kubenswrapper[5045]: I1125 23:04:40.908844 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" Nov 25 23:04:40 crc kubenswrapper[5045]: I1125 23:04:40.955763 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lds4f\" (UniqueName: \"kubernetes.io/projected/e7efb020-9452-4b0d-ad84-8b2c0f028f92-kube-api-access-lds4f\") pod \"e7efb020-9452-4b0d-ad84-8b2c0f028f92\" (UID: \"e7efb020-9452-4b0d-ad84-8b2c0f028f92\") " Nov 25 23:04:40 crc kubenswrapper[5045]: I1125 23:04:40.955837 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7efb020-9452-4b0d-ad84-8b2c0f028f92-serving-cert\") pod \"e7efb020-9452-4b0d-ad84-8b2c0f028f92\" (UID: \"e7efb020-9452-4b0d-ad84-8b2c0f028f92\") " Nov 25 23:04:40 crc kubenswrapper[5045]: I1125 23:04:40.955923 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7efb020-9452-4b0d-ad84-8b2c0f028f92-config\") pod \"e7efb020-9452-4b0d-ad84-8b2c0f028f92\" (UID: \"e7efb020-9452-4b0d-ad84-8b2c0f028f92\") " Nov 25 23:04:40 crc kubenswrapper[5045]: I1125 23:04:40.955996 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e7efb020-9452-4b0d-ad84-8b2c0f028f92-client-ca\") pod \"e7efb020-9452-4b0d-ad84-8b2c0f028f92\" (UID: \"e7efb020-9452-4b0d-ad84-8b2c0f028f92\") " Nov 25 23:04:40 crc kubenswrapper[5045]: I1125 23:04:40.959187 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7efb020-9452-4b0d-ad84-8b2c0f028f92-client-ca" (OuterVolumeSpecName: "client-ca") pod "e7efb020-9452-4b0d-ad84-8b2c0f028f92" (UID: "e7efb020-9452-4b0d-ad84-8b2c0f028f92"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:04:40 crc kubenswrapper[5045]: I1125 23:04:40.966089 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7efb020-9452-4b0d-ad84-8b2c0f028f92-config" (OuterVolumeSpecName: "config") pod "e7efb020-9452-4b0d-ad84-8b2c0f028f92" (UID: "e7efb020-9452-4b0d-ad84-8b2c0f028f92"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:04:40 crc kubenswrapper[5045]: I1125 23:04:40.970927 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7efb020-9452-4b0d-ad84-8b2c0f028f92-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7efb020-9452-4b0d-ad84-8b2c0f028f92" (UID: "e7efb020-9452-4b0d-ad84-8b2c0f028f92"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:04:40 crc kubenswrapper[5045]: I1125 23:04:40.970972 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7efb020-9452-4b0d-ad84-8b2c0f028f92-kube-api-access-lds4f" (OuterVolumeSpecName: "kube-api-access-lds4f") pod "e7efb020-9452-4b0d-ad84-8b2c0f028f92" (UID: "e7efb020-9452-4b0d-ad84-8b2c0f028f92"). InnerVolumeSpecName "kube-api-access-lds4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.057420 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7efb020-9452-4b0d-ad84-8b2c0f028f92-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.057502 5045 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e7efb020-9452-4b0d-ad84-8b2c0f028f92-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.057524 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lds4f\" (UniqueName: \"kubernetes.io/projected/e7efb020-9452-4b0d-ad84-8b2c0f028f92-kube-api-access-lds4f\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.057564 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7efb020-9452-4b0d-ad84-8b2c0f028f92-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.195686 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.259027 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5e92b4d0-05e8-4cba-b505-1f66b64d704f-serving-cert\") pod \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\" (UID: \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\") " Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.259160 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5e92b4d0-05e8-4cba-b505-1f66b64d704f-proxy-ca-bundles\") pod \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\" (UID: \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\") " Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.259234 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5e92b4d0-05e8-4cba-b505-1f66b64d704f-client-ca\") pod \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\" (UID: \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\") " Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.259264 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2nn8f\" (UniqueName: \"kubernetes.io/projected/5e92b4d0-05e8-4cba-b505-1f66b64d704f-kube-api-access-2nn8f\") pod \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\" (UID: \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\") " Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.259315 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e92b4d0-05e8-4cba-b505-1f66b64d704f-config\") pod \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\" (UID: \"5e92b4d0-05e8-4cba-b505-1f66b64d704f\") " Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.259897 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e92b4d0-05e8-4cba-b505-1f66b64d704f-client-ca" (OuterVolumeSpecName: "client-ca") pod "5e92b4d0-05e8-4cba-b505-1f66b64d704f" (UID: "5e92b4d0-05e8-4cba-b505-1f66b64d704f"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.259944 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e92b4d0-05e8-4cba-b505-1f66b64d704f-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "5e92b4d0-05e8-4cba-b505-1f66b64d704f" (UID: "5e92b4d0-05e8-4cba-b505-1f66b64d704f"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.260175 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e92b4d0-05e8-4cba-b505-1f66b64d704f-config" (OuterVolumeSpecName: "config") pod "5e92b4d0-05e8-4cba-b505-1f66b64d704f" (UID: "5e92b4d0-05e8-4cba-b505-1f66b64d704f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.262347 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e92b4d0-05e8-4cba-b505-1f66b64d704f-kube-api-access-2nn8f" (OuterVolumeSpecName: "kube-api-access-2nn8f") pod "5e92b4d0-05e8-4cba-b505-1f66b64d704f" (UID: "5e92b4d0-05e8-4cba-b505-1f66b64d704f"). InnerVolumeSpecName "kube-api-access-2nn8f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.262961 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e92b4d0-05e8-4cba-b505-1f66b64d704f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5e92b4d0-05e8-4cba-b505-1f66b64d704f" (UID: "5e92b4d0-05e8-4cba-b505-1f66b64d704f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.360452 5045 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5e92b4d0-05e8-4cba-b505-1f66b64d704f-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.360496 5045 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5e92b4d0-05e8-4cba-b505-1f66b64d704f-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.360511 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2nn8f\" (UniqueName: \"kubernetes.io/projected/5e92b4d0-05e8-4cba-b505-1f66b64d704f-kube-api-access-2nn8f\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.360526 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e92b4d0-05e8-4cba-b505-1f66b64d704f-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.360539 5045 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5e92b4d0-05e8-4cba-b505-1f66b64d704f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.575421 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7"] Nov 25 23:04:41 crc kubenswrapper[5045]: E1125 23:04:41.575995 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7efb020-9452-4b0d-ad84-8b2c0f028f92" containerName="route-controller-manager" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.576028 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7efb020-9452-4b0d-ad84-8b2c0f028f92" containerName="route-controller-manager" Nov 25 23:04:41 crc kubenswrapper[5045]: E1125 23:04:41.576073 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e92b4d0-05e8-4cba-b505-1f66b64d704f" containerName="controller-manager" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.576092 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e92b4d0-05e8-4cba-b505-1f66b64d704f" containerName="controller-manager" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.576304 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e92b4d0-05e8-4cba-b505-1f66b64d704f" containerName="controller-manager" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.576327 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7efb020-9452-4b0d-ad84-8b2c0f028f92" containerName="route-controller-manager" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.577113 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.578661 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5b768f5b95-tbqz8"] Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.579250 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.586471 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7"] Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.595958 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5b768f5b95-tbqz8"] Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.664242 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67ea2823-8475-4a4d-89e0-bcec58099550-serving-cert\") pod \"controller-manager-5b768f5b95-tbqz8\" (UID: \"67ea2823-8475-4a4d-89e0-bcec58099550\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.664280 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8kg2\" (UniqueName: \"kubernetes.io/projected/0ac6bfed-4621-42af-b61f-4dbd8a4ba52a-kube-api-access-f8kg2\") pod \"route-controller-manager-b7946cdf7-hhfg7\" (UID: \"0ac6bfed-4621-42af-b61f-4dbd8a4ba52a\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.664305 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zws4b\" (UniqueName: \"kubernetes.io/projected/67ea2823-8475-4a4d-89e0-bcec58099550-kube-api-access-zws4b\") pod \"controller-manager-5b768f5b95-tbqz8\" (UID: \"67ea2823-8475-4a4d-89e0-bcec58099550\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.664332 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0ac6bfed-4621-42af-b61f-4dbd8a4ba52a-client-ca\") pod \"route-controller-manager-b7946cdf7-hhfg7\" (UID: \"0ac6bfed-4621-42af-b61f-4dbd8a4ba52a\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.664347 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0ac6bfed-4621-42af-b61f-4dbd8a4ba52a-serving-cert\") pod \"route-controller-manager-b7946cdf7-hhfg7\" (UID: \"0ac6bfed-4621-42af-b61f-4dbd8a4ba52a\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.664393 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ac6bfed-4621-42af-b61f-4dbd8a4ba52a-config\") pod \"route-controller-manager-b7946cdf7-hhfg7\" (UID: \"0ac6bfed-4621-42af-b61f-4dbd8a4ba52a\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.664545 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67ea2823-8475-4a4d-89e0-bcec58099550-config\") pod \"controller-manager-5b768f5b95-tbqz8\" (UID: \"67ea2823-8475-4a4d-89e0-bcec58099550\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.664598 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/67ea2823-8475-4a4d-89e0-bcec58099550-client-ca\") pod \"controller-manager-5b768f5b95-tbqz8\" (UID: \"67ea2823-8475-4a4d-89e0-bcec58099550\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.664624 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/67ea2823-8475-4a4d-89e0-bcec58099550-proxy-ca-bundles\") pod \"controller-manager-5b768f5b95-tbqz8\" (UID: \"67ea2823-8475-4a4d-89e0-bcec58099550\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.765257 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67ea2823-8475-4a4d-89e0-bcec58099550-serving-cert\") pod \"controller-manager-5b768f5b95-tbqz8\" (UID: \"67ea2823-8475-4a4d-89e0-bcec58099550\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.765304 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8kg2\" (UniqueName: \"kubernetes.io/projected/0ac6bfed-4621-42af-b61f-4dbd8a4ba52a-kube-api-access-f8kg2\") pod \"route-controller-manager-b7946cdf7-hhfg7\" (UID: \"0ac6bfed-4621-42af-b61f-4dbd8a4ba52a\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.765339 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zws4b\" (UniqueName: \"kubernetes.io/projected/67ea2823-8475-4a4d-89e0-bcec58099550-kube-api-access-zws4b\") pod \"controller-manager-5b768f5b95-tbqz8\" (UID: \"67ea2823-8475-4a4d-89e0-bcec58099550\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.765378 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0ac6bfed-4621-42af-b61f-4dbd8a4ba52a-client-ca\") pod \"route-controller-manager-b7946cdf7-hhfg7\" (UID: \"0ac6bfed-4621-42af-b61f-4dbd8a4ba52a\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.765400 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0ac6bfed-4621-42af-b61f-4dbd8a4ba52a-serving-cert\") pod \"route-controller-manager-b7946cdf7-hhfg7\" (UID: \"0ac6bfed-4621-42af-b61f-4dbd8a4ba52a\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.765426 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ac6bfed-4621-42af-b61f-4dbd8a4ba52a-config\") pod \"route-controller-manager-b7946cdf7-hhfg7\" (UID: \"0ac6bfed-4621-42af-b61f-4dbd8a4ba52a\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.765460 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67ea2823-8475-4a4d-89e0-bcec58099550-config\") pod \"controller-manager-5b768f5b95-tbqz8\" (UID: \"67ea2823-8475-4a4d-89e0-bcec58099550\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.765484 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/67ea2823-8475-4a4d-89e0-bcec58099550-proxy-ca-bundles\") pod \"controller-manager-5b768f5b95-tbqz8\" (UID: \"67ea2823-8475-4a4d-89e0-bcec58099550\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.765505 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/67ea2823-8475-4a4d-89e0-bcec58099550-client-ca\") pod \"controller-manager-5b768f5b95-tbqz8\" (UID: \"67ea2823-8475-4a4d-89e0-bcec58099550\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.767211 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0ac6bfed-4621-42af-b61f-4dbd8a4ba52a-client-ca\") pod \"route-controller-manager-b7946cdf7-hhfg7\" (UID: \"0ac6bfed-4621-42af-b61f-4dbd8a4ba52a\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.767319 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/67ea2823-8475-4a4d-89e0-bcec58099550-proxy-ca-bundles\") pod \"controller-manager-5b768f5b95-tbqz8\" (UID: \"67ea2823-8475-4a4d-89e0-bcec58099550\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.767482 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ac6bfed-4621-42af-b61f-4dbd8a4ba52a-config\") pod \"route-controller-manager-b7946cdf7-hhfg7\" (UID: \"0ac6bfed-4621-42af-b61f-4dbd8a4ba52a\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.767857 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/67ea2823-8475-4a4d-89e0-bcec58099550-client-ca\") pod \"controller-manager-5b768f5b95-tbqz8\" (UID: \"67ea2823-8475-4a4d-89e0-bcec58099550\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.768379 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67ea2823-8475-4a4d-89e0-bcec58099550-config\") pod \"controller-manager-5b768f5b95-tbqz8\" (UID: \"67ea2823-8475-4a4d-89e0-bcec58099550\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.771515 5045 generic.go:334] "Generic (PLEG): container finished" podID="5e92b4d0-05e8-4cba-b505-1f66b64d704f" containerID="82e0d295b331a642ab2229203c4ee921d769a5a9d1e62c8343ea2f7d4bab8c0c" exitCode=0 Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.771818 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" event={"ID":"5e92b4d0-05e8-4cba-b505-1f66b64d704f","Type":"ContainerDied","Data":"82e0d295b331a642ab2229203c4ee921d769a5a9d1e62c8343ea2f7d4bab8c0c"} Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.771846 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" event={"ID":"5e92b4d0-05e8-4cba-b505-1f66b64d704f","Type":"ContainerDied","Data":"dcc1b41da02c8ffbdf9b2805f87bec298240f97ce7593cd54e1df46fea0359b8"} Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.771867 5045 scope.go:117] "RemoveContainer" containerID="82e0d295b331a642ab2229203c4ee921d769a5a9d1e62c8343ea2f7d4bab8c0c" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.771994 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.772825 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0ac6bfed-4621-42af-b61f-4dbd8a4ba52a-serving-cert\") pod \"route-controller-manager-b7946cdf7-hhfg7\" (UID: \"0ac6bfed-4621-42af-b61f-4dbd8a4ba52a\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.783831 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" event={"ID":"e7efb020-9452-4b0d-ad84-8b2c0f028f92","Type":"ContainerDied","Data":"396324ac53a2302200030dcd91d6ee24f868885b24b92555b8d9eb87c24e8738"} Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.783937 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.784844 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67ea2823-8475-4a4d-89e0-bcec58099550-serving-cert\") pod \"controller-manager-5b768f5b95-tbqz8\" (UID: \"67ea2823-8475-4a4d-89e0-bcec58099550\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.786514 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8kg2\" (UniqueName: \"kubernetes.io/projected/0ac6bfed-4621-42af-b61f-4dbd8a4ba52a-kube-api-access-f8kg2\") pod \"route-controller-manager-b7946cdf7-hhfg7\" (UID: \"0ac6bfed-4621-42af-b61f-4dbd8a4ba52a\") " pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.797059 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zws4b\" (UniqueName: \"kubernetes.io/projected/67ea2823-8475-4a4d-89e0-bcec58099550-kube-api-access-zws4b\") pod \"controller-manager-5b768f5b95-tbqz8\" (UID: \"67ea2823-8475-4a4d-89e0-bcec58099550\") " pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.834947 5045 scope.go:117] "RemoveContainer" containerID="82e0d295b331a642ab2229203c4ee921d769a5a9d1e62c8343ea2f7d4bab8c0c" Nov 25 23:04:41 crc kubenswrapper[5045]: E1125 23:04:41.838195 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82e0d295b331a642ab2229203c4ee921d769a5a9d1e62c8343ea2f7d4bab8c0c\": container with ID starting with 82e0d295b331a642ab2229203c4ee921d769a5a9d1e62c8343ea2f7d4bab8c0c not found: ID does not exist" containerID="82e0d295b331a642ab2229203c4ee921d769a5a9d1e62c8343ea2f7d4bab8c0c" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.838238 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82e0d295b331a642ab2229203c4ee921d769a5a9d1e62c8343ea2f7d4bab8c0c"} err="failed to get container status \"82e0d295b331a642ab2229203c4ee921d769a5a9d1e62c8343ea2f7d4bab8c0c\": rpc error: code = NotFound desc = could not find container \"82e0d295b331a642ab2229203c4ee921d769a5a9d1e62c8343ea2f7d4bab8c0c\": container with ID starting with 82e0d295b331a642ab2229203c4ee921d769a5a9d1e62c8343ea2f7d4bab8c0c not found: ID does not exist" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.838265 5045 scope.go:117] "RemoveContainer" containerID="25b82f10321dcd6f9d32791b6dd6d926f0fef5e2ee0d8e6c57dd5018977a9229" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.846002 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb"] Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.856104 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c894cb598-x4tkb"] Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.869443 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs"] Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.872371 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6b7fdcdf76-fmrcs"] Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.919251 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7" Nov 25 23:04:41 crc kubenswrapper[5045]: I1125 23:04:41.928473 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" Nov 25 23:04:42 crc kubenswrapper[5045]: I1125 23:04:42.180039 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7"] Nov 25 23:04:42 crc kubenswrapper[5045]: W1125 23:04:42.183875 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0ac6bfed_4621_42af_b61f_4dbd8a4ba52a.slice/crio-d55e122afb7a0696fbf6d5168763170b23b18782a768ccf82b437e3ddbce41fe WatchSource:0}: Error finding container d55e122afb7a0696fbf6d5168763170b23b18782a768ccf82b437e3ddbce41fe: Status 404 returned error can't find the container with id d55e122afb7a0696fbf6d5168763170b23b18782a768ccf82b437e3ddbce41fe Nov 25 23:04:42 crc kubenswrapper[5045]: I1125 23:04:42.211524 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5b768f5b95-tbqz8"] Nov 25 23:04:42 crc kubenswrapper[5045]: W1125 23:04:42.214271 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod67ea2823_8475_4a4d_89e0_bcec58099550.slice/crio-98ca17e621c88d3461e116023b2890bdcbab2e357a3499ff6b8fcab2fb4f6684 WatchSource:0}: Error finding container 98ca17e621c88d3461e116023b2890bdcbab2e357a3499ff6b8fcab2fb4f6684: Status 404 returned error can't find the container with id 98ca17e621c88d3461e116023b2890bdcbab2e357a3499ff6b8fcab2fb4f6684 Nov 25 23:04:42 crc kubenswrapper[5045]: I1125 23:04:42.401949 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e92b4d0-05e8-4cba-b505-1f66b64d704f" path="/var/lib/kubelet/pods/5e92b4d0-05e8-4cba-b505-1f66b64d704f/volumes" Nov 25 23:04:42 crc kubenswrapper[5045]: I1125 23:04:42.402750 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7efb020-9452-4b0d-ad84-8b2c0f028f92" path="/var/lib/kubelet/pods/e7efb020-9452-4b0d-ad84-8b2c0f028f92/volumes" Nov 25 23:04:42 crc kubenswrapper[5045]: I1125 23:04:42.791683 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" event={"ID":"67ea2823-8475-4a4d-89e0-bcec58099550","Type":"ContainerStarted","Data":"4e091a79e816687cf8491a08822c6787ffd9819dd4cf5ca58ace1ae2f984cc0b"} Nov 25 23:04:42 crc kubenswrapper[5045]: I1125 23:04:42.791744 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" event={"ID":"67ea2823-8475-4a4d-89e0-bcec58099550","Type":"ContainerStarted","Data":"98ca17e621c88d3461e116023b2890bdcbab2e357a3499ff6b8fcab2fb4f6684"} Nov 25 23:04:42 crc kubenswrapper[5045]: I1125 23:04:42.793041 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" Nov 25 23:04:42 crc kubenswrapper[5045]: I1125 23:04:42.795291 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7" event={"ID":"0ac6bfed-4621-42af-b61f-4dbd8a4ba52a","Type":"ContainerStarted","Data":"3de645e034cfebe8e96541fdcdcb7102a156360e2d6efabc90ebc5cc8ef3eaa3"} Nov 25 23:04:42 crc kubenswrapper[5045]: I1125 23:04:42.795325 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7" event={"ID":"0ac6bfed-4621-42af-b61f-4dbd8a4ba52a","Type":"ContainerStarted","Data":"d55e122afb7a0696fbf6d5168763170b23b18782a768ccf82b437e3ddbce41fe"} Nov 25 23:04:42 crc kubenswrapper[5045]: I1125 23:04:42.796099 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7" Nov 25 23:04:42 crc kubenswrapper[5045]: I1125 23:04:42.798462 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" Nov 25 23:04:42 crc kubenswrapper[5045]: I1125 23:04:42.800545 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7" Nov 25 23:04:42 crc kubenswrapper[5045]: I1125 23:04:42.810417 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5b768f5b95-tbqz8" podStartSLOduration=2.810380639 podStartE2EDuration="2.810380639s" podCreationTimestamp="2025-11-25 23:04:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:04:42.806644338 +0000 UTC m=+339.164303470" watchObservedRunningTime="2025-11-25 23:04:42.810380639 +0000 UTC m=+339.168039751" Nov 25 23:04:42 crc kubenswrapper[5045]: I1125 23:04:42.850599 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-b7946cdf7-hhfg7" podStartSLOduration=2.850583535 podStartE2EDuration="2.850583535s" podCreationTimestamp="2025-11-25 23:04:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:04:42.823364065 +0000 UTC m=+339.181023187" watchObservedRunningTime="2025-11-25 23:04:42.850583535 +0000 UTC m=+339.208242637" Nov 25 23:04:50 crc kubenswrapper[5045]: I1125 23:04:50.816916 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-mghmj"] Nov 25 23:04:50 crc kubenswrapper[5045]: I1125 23:04:50.818423 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:50 crc kubenswrapper[5045]: I1125 23:04:50.835462 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-mghmj"] Nov 25 23:04:50 crc kubenswrapper[5045]: I1125 23:04:50.993111 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/606738d3-b38e-4213-9a5c-5a5c9e34abfb-registry-tls\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:50 crc kubenswrapper[5045]: I1125 23:04:50.993163 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/606738d3-b38e-4213-9a5c-5a5c9e34abfb-trusted-ca\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:50 crc kubenswrapper[5045]: I1125 23:04:50.993188 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/606738d3-b38e-4213-9a5c-5a5c9e34abfb-installation-pull-secrets\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:50 crc kubenswrapper[5045]: I1125 23:04:50.993217 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/606738d3-b38e-4213-9a5c-5a5c9e34abfb-registry-certificates\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:50 crc kubenswrapper[5045]: I1125 23:04:50.993235 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/606738d3-b38e-4213-9a5c-5a5c9e34abfb-ca-trust-extracted\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:50 crc kubenswrapper[5045]: I1125 23:04:50.993335 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/606738d3-b38e-4213-9a5c-5a5c9e34abfb-bound-sa-token\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:50 crc kubenswrapper[5045]: I1125 23:04:50.993377 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z59cs\" (UniqueName: \"kubernetes.io/projected/606738d3-b38e-4213-9a5c-5a5c9e34abfb-kube-api-access-z59cs\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:50 crc kubenswrapper[5045]: I1125 23:04:50.993440 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.016795 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.094534 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/606738d3-b38e-4213-9a5c-5a5c9e34abfb-registry-certificates\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.094589 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/606738d3-b38e-4213-9a5c-5a5c9e34abfb-ca-trust-extracted\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.094613 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/606738d3-b38e-4213-9a5c-5a5c9e34abfb-bound-sa-token\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.094637 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z59cs\" (UniqueName: \"kubernetes.io/projected/606738d3-b38e-4213-9a5c-5a5c9e34abfb-kube-api-access-z59cs\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.094702 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/606738d3-b38e-4213-9a5c-5a5c9e34abfb-registry-tls\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.094758 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/606738d3-b38e-4213-9a5c-5a5c9e34abfb-trusted-ca\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.094782 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/606738d3-b38e-4213-9a5c-5a5c9e34abfb-installation-pull-secrets\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.095979 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/606738d3-b38e-4213-9a5c-5a5c9e34abfb-ca-trust-extracted\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.096613 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/606738d3-b38e-4213-9a5c-5a5c9e34abfb-registry-certificates\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.097060 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/606738d3-b38e-4213-9a5c-5a5c9e34abfb-trusted-ca\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.100367 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/606738d3-b38e-4213-9a5c-5a5c9e34abfb-installation-pull-secrets\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.100888 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/606738d3-b38e-4213-9a5c-5a5c9e34abfb-registry-tls\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.118141 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/606738d3-b38e-4213-9a5c-5a5c9e34abfb-bound-sa-token\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.119124 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z59cs\" (UniqueName: \"kubernetes.io/projected/606738d3-b38e-4213-9a5c-5a5c9e34abfb-kube-api-access-z59cs\") pod \"image-registry-66df7c8f76-mghmj\" (UID: \"606738d3-b38e-4213-9a5c-5a5c9e34abfb\") " pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.201040 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.659595 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-mghmj"] Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.903110 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" event={"ID":"606738d3-b38e-4213-9a5c-5a5c9e34abfb","Type":"ContainerStarted","Data":"6afa6694638104639844b818e36ee2a34dc82110c6033afebb589903829c53d4"} Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.903372 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" event={"ID":"606738d3-b38e-4213-9a5c-5a5c9e34abfb","Type":"ContainerStarted","Data":"919defea4a5e84ac0eb7887e45f39014620f2980f1e134e23bb74fafc72aa949"} Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.903521 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:04:51 crc kubenswrapper[5045]: I1125 23:04:51.928886 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" podStartSLOduration=1.928867231 podStartE2EDuration="1.928867231s" podCreationTimestamp="2025-11-25 23:04:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:04:51.928131809 +0000 UTC m=+348.285790941" watchObservedRunningTime="2025-11-25 23:04:51.928867231 +0000 UTC m=+348.286526343" Nov 25 23:05:00 crc kubenswrapper[5045]: I1125 23:05:00.540832 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:05:00 crc kubenswrapper[5045]: I1125 23:05:00.541369 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:05:11 crc kubenswrapper[5045]: I1125 23:05:11.214008 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-mghmj" Nov 25 23:05:11 crc kubenswrapper[5045]: I1125 23:05:11.289933 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-9cmr2"] Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.005786 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-frnbt"] Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.006664 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-frnbt" podUID="eb680718-f140-4525-950a-980e0dc1ed87" containerName="registry-server" containerID="cri-o://93d284e10f6e955c17fc17d5b29443358521e91246251ddd51c9c21301f684d1" gracePeriod=30 Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.015269 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wzn6m"] Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.015656 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wzn6m" podUID="00bdfdd4-092b-4071-87c1-fb9386f7114e" containerName="registry-server" containerID="cri-o://5ca1956ce6e1bd8cf01a60299f45ea23e286a1355dab057d963ec532b4661c27" gracePeriod=30 Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.023301 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-84zml"] Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.023508 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-84zml" podUID="a7129135-79a3-478d-9ae4-78f7fe46280f" containerName="marketplace-operator" containerID="cri-o://5cac7e5395874d502d01520f2c84a3cf3d2a1ed938361e835d66e662333c6ecf" gracePeriod=30 Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.033958 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-h2rl5"] Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.034216 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-h2rl5" podUID="571187eb-51e5-40d8-83b3-2295535de7e6" containerName="registry-server" containerID="cri-o://2ed655826a97f244eed78bfe0f91c46fc389f9e48e654dd8539a61ec52c90fdf" gracePeriod=30 Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.051603 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-f5g5t"] Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.053944 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-f5g5t" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.066390 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-f5g5t"] Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.081806 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cntjg"] Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.086026 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-cntjg" podUID="cc573acb-eee1-4849-967f-fd1b253b640f" containerName="registry-server" containerID="cri-o://edcf934a61d1c587508cb870d80b46998eb1e05060cd6ae873f684440ca755a5" gracePeriod=30 Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.208429 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/292cc94b-5ed6-4491-8168-1ac68858f418-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-f5g5t\" (UID: \"292cc94b-5ed6-4491-8168-1ac68858f418\") " pod="openshift-marketplace/marketplace-operator-79b997595-f5g5t" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.208517 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/292cc94b-5ed6-4491-8168-1ac68858f418-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-f5g5t\" (UID: \"292cc94b-5ed6-4491-8168-1ac68858f418\") " pod="openshift-marketplace/marketplace-operator-79b997595-f5g5t" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.208545 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mxvms\" (UniqueName: \"kubernetes.io/projected/292cc94b-5ed6-4491-8168-1ac68858f418-kube-api-access-mxvms\") pod \"marketplace-operator-79b997595-f5g5t\" (UID: \"292cc94b-5ed6-4491-8168-1ac68858f418\") " pod="openshift-marketplace/marketplace-operator-79b997595-f5g5t" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.309342 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/292cc94b-5ed6-4491-8168-1ac68858f418-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-f5g5t\" (UID: \"292cc94b-5ed6-4491-8168-1ac68858f418\") " pod="openshift-marketplace/marketplace-operator-79b997595-f5g5t" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.309416 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/292cc94b-5ed6-4491-8168-1ac68858f418-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-f5g5t\" (UID: \"292cc94b-5ed6-4491-8168-1ac68858f418\") " pod="openshift-marketplace/marketplace-operator-79b997595-f5g5t" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.309445 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mxvms\" (UniqueName: \"kubernetes.io/projected/292cc94b-5ed6-4491-8168-1ac68858f418-kube-api-access-mxvms\") pod \"marketplace-operator-79b997595-f5g5t\" (UID: \"292cc94b-5ed6-4491-8168-1ac68858f418\") " pod="openshift-marketplace/marketplace-operator-79b997595-f5g5t" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.319467 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/292cc94b-5ed6-4491-8168-1ac68858f418-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-f5g5t\" (UID: \"292cc94b-5ed6-4491-8168-1ac68858f418\") " pod="openshift-marketplace/marketplace-operator-79b997595-f5g5t" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.321599 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/292cc94b-5ed6-4491-8168-1ac68858f418-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-f5g5t\" (UID: \"292cc94b-5ed6-4491-8168-1ac68858f418\") " pod="openshift-marketplace/marketplace-operator-79b997595-f5g5t" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.335855 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mxvms\" (UniqueName: \"kubernetes.io/projected/292cc94b-5ed6-4491-8168-1ac68858f418-kube-api-access-mxvms\") pod \"marketplace-operator-79b997595-f5g5t\" (UID: \"292cc94b-5ed6-4491-8168-1ac68858f418\") " pod="openshift-marketplace/marketplace-operator-79b997595-f5g5t" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.392042 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-f5g5t" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.511176 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wzn6m" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.576967 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-84zml" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.617318 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h2rl5" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.617465 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfv4q\" (UniqueName: \"kubernetes.io/projected/00bdfdd4-092b-4071-87c1-fb9386f7114e-kube-api-access-pfv4q\") pod \"00bdfdd4-092b-4071-87c1-fb9386f7114e\" (UID: \"00bdfdd4-092b-4071-87c1-fb9386f7114e\") " Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.617552 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00bdfdd4-092b-4071-87c1-fb9386f7114e-utilities\") pod \"00bdfdd4-092b-4071-87c1-fb9386f7114e\" (UID: \"00bdfdd4-092b-4071-87c1-fb9386f7114e\") " Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.617645 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00bdfdd4-092b-4071-87c1-fb9386f7114e-catalog-content\") pod \"00bdfdd4-092b-4071-87c1-fb9386f7114e\" (UID: \"00bdfdd4-092b-4071-87c1-fb9386f7114e\") " Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.618871 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00bdfdd4-092b-4071-87c1-fb9386f7114e-utilities" (OuterVolumeSpecName: "utilities") pod "00bdfdd4-092b-4071-87c1-fb9386f7114e" (UID: "00bdfdd4-092b-4071-87c1-fb9386f7114e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.624238 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-frnbt" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.628824 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cntjg" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.636563 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00bdfdd4-092b-4071-87c1-fb9386f7114e-kube-api-access-pfv4q" (OuterVolumeSpecName: "kube-api-access-pfv4q") pod "00bdfdd4-092b-4071-87c1-fb9386f7114e" (UID: "00bdfdd4-092b-4071-87c1-fb9386f7114e"). InnerVolumeSpecName "kube-api-access-pfv4q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.685376 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00bdfdd4-092b-4071-87c1-fb9386f7114e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "00bdfdd4-092b-4071-87c1-fb9386f7114e" (UID: "00bdfdd4-092b-4071-87c1-fb9386f7114e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.718583 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xm7dv\" (UniqueName: \"kubernetes.io/projected/eb680718-f140-4525-950a-980e0dc1ed87-kube-api-access-xm7dv\") pod \"eb680718-f140-4525-950a-980e0dc1ed87\" (UID: \"eb680718-f140-4525-950a-980e0dc1ed87\") " Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.718643 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/a7129135-79a3-478d-9ae4-78f7fe46280f-marketplace-operator-metrics\") pod \"a7129135-79a3-478d-9ae4-78f7fe46280f\" (UID: \"a7129135-79a3-478d-9ae4-78f7fe46280f\") " Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.718673 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xw4v6\" (UniqueName: \"kubernetes.io/projected/571187eb-51e5-40d8-83b3-2295535de7e6-kube-api-access-xw4v6\") pod \"571187eb-51e5-40d8-83b3-2295535de7e6\" (UID: \"571187eb-51e5-40d8-83b3-2295535de7e6\") " Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.718705 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-62gkr\" (UniqueName: \"kubernetes.io/projected/a7129135-79a3-478d-9ae4-78f7fe46280f-kube-api-access-62gkr\") pod \"a7129135-79a3-478d-9ae4-78f7fe46280f\" (UID: \"a7129135-79a3-478d-9ae4-78f7fe46280f\") " Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.718809 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/571187eb-51e5-40d8-83b3-2295535de7e6-utilities\") pod \"571187eb-51e5-40d8-83b3-2295535de7e6\" (UID: \"571187eb-51e5-40d8-83b3-2295535de7e6\") " Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.718835 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb680718-f140-4525-950a-980e0dc1ed87-catalog-content\") pod \"eb680718-f140-4525-950a-980e0dc1ed87\" (UID: \"eb680718-f140-4525-950a-980e0dc1ed87\") " Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.718887 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/571187eb-51e5-40d8-83b3-2295535de7e6-catalog-content\") pod \"571187eb-51e5-40d8-83b3-2295535de7e6\" (UID: \"571187eb-51e5-40d8-83b3-2295535de7e6\") " Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.718915 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb680718-f140-4525-950a-980e0dc1ed87-utilities\") pod \"eb680718-f140-4525-950a-980e0dc1ed87\" (UID: \"eb680718-f140-4525-950a-980e0dc1ed87\") " Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.718934 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a7129135-79a3-478d-9ae4-78f7fe46280f-marketplace-trusted-ca\") pod \"a7129135-79a3-478d-9ae4-78f7fe46280f\" (UID: \"a7129135-79a3-478d-9ae4-78f7fe46280f\") " Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.719120 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00bdfdd4-092b-4071-87c1-fb9386f7114e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.719134 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfv4q\" (UniqueName: \"kubernetes.io/projected/00bdfdd4-092b-4071-87c1-fb9386f7114e-kube-api-access-pfv4q\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.719145 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00bdfdd4-092b-4071-87c1-fb9386f7114e-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.720064 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/571187eb-51e5-40d8-83b3-2295535de7e6-utilities" (OuterVolumeSpecName: "utilities") pod "571187eb-51e5-40d8-83b3-2295535de7e6" (UID: "571187eb-51e5-40d8-83b3-2295535de7e6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.720130 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7129135-79a3-478d-9ae4-78f7fe46280f-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "a7129135-79a3-478d-9ae4-78f7fe46280f" (UID: "a7129135-79a3-478d-9ae4-78f7fe46280f"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.720346 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb680718-f140-4525-950a-980e0dc1ed87-utilities" (OuterVolumeSpecName: "utilities") pod "eb680718-f140-4525-950a-980e0dc1ed87" (UID: "eb680718-f140-4525-950a-980e0dc1ed87"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.723391 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7129135-79a3-478d-9ae4-78f7fe46280f-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "a7129135-79a3-478d-9ae4-78f7fe46280f" (UID: "a7129135-79a3-478d-9ae4-78f7fe46280f"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.723517 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7129135-79a3-478d-9ae4-78f7fe46280f-kube-api-access-62gkr" (OuterVolumeSpecName: "kube-api-access-62gkr") pod "a7129135-79a3-478d-9ae4-78f7fe46280f" (UID: "a7129135-79a3-478d-9ae4-78f7fe46280f"). InnerVolumeSpecName "kube-api-access-62gkr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.728301 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb680718-f140-4525-950a-980e0dc1ed87-kube-api-access-xm7dv" (OuterVolumeSpecName: "kube-api-access-xm7dv") pod "eb680718-f140-4525-950a-980e0dc1ed87" (UID: "eb680718-f140-4525-950a-980e0dc1ed87"). InnerVolumeSpecName "kube-api-access-xm7dv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.729325 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/571187eb-51e5-40d8-83b3-2295535de7e6-kube-api-access-xw4v6" (OuterVolumeSpecName: "kube-api-access-xw4v6") pod "571187eb-51e5-40d8-83b3-2295535de7e6" (UID: "571187eb-51e5-40d8-83b3-2295535de7e6"). InnerVolumeSpecName "kube-api-access-xw4v6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.736991 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/571187eb-51e5-40d8-83b3-2295535de7e6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "571187eb-51e5-40d8-83b3-2295535de7e6" (UID: "571187eb-51e5-40d8-83b3-2295535de7e6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.765820 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb680718-f140-4525-950a-980e0dc1ed87-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eb680718-f140-4525-950a-980e0dc1ed87" (UID: "eb680718-f140-4525-950a-980e0dc1ed87"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.820279 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gmmlb\" (UniqueName: \"kubernetes.io/projected/cc573acb-eee1-4849-967f-fd1b253b640f-kube-api-access-gmmlb\") pod \"cc573acb-eee1-4849-967f-fd1b253b640f\" (UID: \"cc573acb-eee1-4849-967f-fd1b253b640f\") " Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.820351 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc573acb-eee1-4849-967f-fd1b253b640f-catalog-content\") pod \"cc573acb-eee1-4849-967f-fd1b253b640f\" (UID: \"cc573acb-eee1-4849-967f-fd1b253b640f\") " Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.820418 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc573acb-eee1-4849-967f-fd1b253b640f-utilities\") pod \"cc573acb-eee1-4849-967f-fd1b253b640f\" (UID: \"cc573acb-eee1-4849-967f-fd1b253b640f\") " Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.820700 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/571187eb-51e5-40d8-83b3-2295535de7e6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.820737 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb680718-f140-4525-950a-980e0dc1ed87-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.820750 5045 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a7129135-79a3-478d-9ae4-78f7fe46280f-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.820762 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xm7dv\" (UniqueName: \"kubernetes.io/projected/eb680718-f140-4525-950a-980e0dc1ed87-kube-api-access-xm7dv\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.820776 5045 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/a7129135-79a3-478d-9ae4-78f7fe46280f-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.820788 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xw4v6\" (UniqueName: \"kubernetes.io/projected/571187eb-51e5-40d8-83b3-2295535de7e6-kube-api-access-xw4v6\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.820798 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-62gkr\" (UniqueName: \"kubernetes.io/projected/a7129135-79a3-478d-9ae4-78f7fe46280f-kube-api-access-62gkr\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.820808 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/571187eb-51e5-40d8-83b3-2295535de7e6-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.820819 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb680718-f140-4525-950a-980e0dc1ed87-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.821649 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc573acb-eee1-4849-967f-fd1b253b640f-utilities" (OuterVolumeSpecName: "utilities") pod "cc573acb-eee1-4849-967f-fd1b253b640f" (UID: "cc573acb-eee1-4849-967f-fd1b253b640f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.822743 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc573acb-eee1-4849-967f-fd1b253b640f-kube-api-access-gmmlb" (OuterVolumeSpecName: "kube-api-access-gmmlb") pod "cc573acb-eee1-4849-967f-fd1b253b640f" (UID: "cc573acb-eee1-4849-967f-fd1b253b640f"). InnerVolumeSpecName "kube-api-access-gmmlb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.898874 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-f5g5t"] Nov 25 23:05:18 crc kubenswrapper[5045]: W1125 23:05:18.909102 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod292cc94b_5ed6_4491_8168_1ac68858f418.slice/crio-fa318fbbdb94799592d0cbee3b0b7e9f26d64b8c5a45215890dfa0b3a2d90a82 WatchSource:0}: Error finding container fa318fbbdb94799592d0cbee3b0b7e9f26d64b8c5a45215890dfa0b3a2d90a82: Status 404 returned error can't find the container with id fa318fbbdb94799592d0cbee3b0b7e9f26d64b8c5a45215890dfa0b3a2d90a82 Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.913400 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc573acb-eee1-4849-967f-fd1b253b640f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cc573acb-eee1-4849-967f-fd1b253b640f" (UID: "cc573acb-eee1-4849-967f-fd1b253b640f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.922411 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gmmlb\" (UniqueName: \"kubernetes.io/projected/cc573acb-eee1-4849-967f-fd1b253b640f-kube-api-access-gmmlb\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.922451 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc573acb-eee1-4849-967f-fd1b253b640f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:18 crc kubenswrapper[5045]: I1125 23:05:18.922465 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc573acb-eee1-4849-967f-fd1b253b640f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.102969 5045 generic.go:334] "Generic (PLEG): container finished" podID="571187eb-51e5-40d8-83b3-2295535de7e6" containerID="2ed655826a97f244eed78bfe0f91c46fc389f9e48e654dd8539a61ec52c90fdf" exitCode=0 Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.103039 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h2rl5" event={"ID":"571187eb-51e5-40d8-83b3-2295535de7e6","Type":"ContainerDied","Data":"2ed655826a97f244eed78bfe0f91c46fc389f9e48e654dd8539a61ec52c90fdf"} Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.103421 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h2rl5" event={"ID":"571187eb-51e5-40d8-83b3-2295535de7e6","Type":"ContainerDied","Data":"12edf3cf9ee94af7ea6c8ef1083c601ff9fbc763637110cb319f3142495a3e98"} Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.103109 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h2rl5" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.103460 5045 scope.go:117] "RemoveContainer" containerID="2ed655826a97f244eed78bfe0f91c46fc389f9e48e654dd8539a61ec52c90fdf" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.107207 5045 generic.go:334] "Generic (PLEG): container finished" podID="cc573acb-eee1-4849-967f-fd1b253b640f" containerID="edcf934a61d1c587508cb870d80b46998eb1e05060cd6ae873f684440ca755a5" exitCode=0 Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.107321 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cntjg" event={"ID":"cc573acb-eee1-4849-967f-fd1b253b640f","Type":"ContainerDied","Data":"edcf934a61d1c587508cb870d80b46998eb1e05060cd6ae873f684440ca755a5"} Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.107390 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cntjg" event={"ID":"cc573acb-eee1-4849-967f-fd1b253b640f","Type":"ContainerDied","Data":"567ba330f8db8d29c8acde5d829c747e099f99bd9d9393ef57e4b40b489bea07"} Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.107467 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cntjg" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.112494 5045 generic.go:334] "Generic (PLEG): container finished" podID="a7129135-79a3-478d-9ae4-78f7fe46280f" containerID="5cac7e5395874d502d01520f2c84a3cf3d2a1ed938361e835d66e662333c6ecf" exitCode=0 Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.112588 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-84zml" event={"ID":"a7129135-79a3-478d-9ae4-78f7fe46280f","Type":"ContainerDied","Data":"5cac7e5395874d502d01520f2c84a3cf3d2a1ed938361e835d66e662333c6ecf"} Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.112619 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-84zml" event={"ID":"a7129135-79a3-478d-9ae4-78f7fe46280f","Type":"ContainerDied","Data":"017deabd0e828df39fee692cb5a76ecf099b5214a17e4148e69a2bb6b4d1bd85"} Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.112697 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-84zml" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.124838 5045 generic.go:334] "Generic (PLEG): container finished" podID="00bdfdd4-092b-4071-87c1-fb9386f7114e" containerID="5ca1956ce6e1bd8cf01a60299f45ea23e286a1355dab057d963ec532b4661c27" exitCode=0 Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.124958 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wzn6m" event={"ID":"00bdfdd4-092b-4071-87c1-fb9386f7114e","Type":"ContainerDied","Data":"5ca1956ce6e1bd8cf01a60299f45ea23e286a1355dab057d963ec532b4661c27"} Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.124987 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wzn6m" event={"ID":"00bdfdd4-092b-4071-87c1-fb9386f7114e","Type":"ContainerDied","Data":"e3da41be3de5e434fef5de3319e89b618c0057cef73c38c392664b37cc914732"} Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.125005 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wzn6m" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.127998 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-f5g5t" event={"ID":"292cc94b-5ed6-4491-8168-1ac68858f418","Type":"ContainerStarted","Data":"de755bbee96f28b9c8103788c9054775a082fca22d8d8e030db8dc7470587a01"} Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.128027 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-f5g5t" event={"ID":"292cc94b-5ed6-4491-8168-1ac68858f418","Type":"ContainerStarted","Data":"fa318fbbdb94799592d0cbee3b0b7e9f26d64b8c5a45215890dfa0b3a2d90a82"} Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.129112 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-f5g5t" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.131040 5045 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-f5g5t container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/healthz\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.131091 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-f5g5t" podUID="292cc94b-5ed6-4491-8168-1ac68858f418" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.66:8080/healthz\": dial tcp 10.217.0.66:8080: connect: connection refused" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.132907 5045 generic.go:334] "Generic (PLEG): container finished" podID="eb680718-f140-4525-950a-980e0dc1ed87" containerID="93d284e10f6e955c17fc17d5b29443358521e91246251ddd51c9c21301f684d1" exitCode=0 Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.132952 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-frnbt" event={"ID":"eb680718-f140-4525-950a-980e0dc1ed87","Type":"ContainerDied","Data":"93d284e10f6e955c17fc17d5b29443358521e91246251ddd51c9c21301f684d1"} Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.132980 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-frnbt" event={"ID":"eb680718-f140-4525-950a-980e0dc1ed87","Type":"ContainerDied","Data":"b7a7c10bad72e3079ea5e6aa6a2fb635bbb35dd8b4a4584c4ae6ac12ec1c3d67"} Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.133048 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-frnbt" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.147434 5045 scope.go:117] "RemoveContainer" containerID="b9a2a865d4409d016ee3770e3b5eacd69f918af1c6ae25ade9d8c1dfe59e8eac" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.165748 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-f5g5t" podStartSLOduration=1.165700003 podStartE2EDuration="1.165700003s" podCreationTimestamp="2025-11-25 23:05:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:05:19.162616901 +0000 UTC m=+375.520276023" watchObservedRunningTime="2025-11-25 23:05:19.165700003 +0000 UTC m=+375.523359115" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.193880 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-h2rl5"] Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.206703 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-h2rl5"] Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.216247 5045 scope.go:117] "RemoveContainer" containerID="ea1d85f49b121331753b72dbf238889ed764621a9a5031644e4459b300c5b18c" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.230322 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-frnbt"] Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.234589 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-frnbt"] Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.237834 5045 scope.go:117] "RemoveContainer" containerID="2ed655826a97f244eed78bfe0f91c46fc389f9e48e654dd8539a61ec52c90fdf" Nov 25 23:05:19 crc kubenswrapper[5045]: E1125 23:05:19.238451 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ed655826a97f244eed78bfe0f91c46fc389f9e48e654dd8539a61ec52c90fdf\": container with ID starting with 2ed655826a97f244eed78bfe0f91c46fc389f9e48e654dd8539a61ec52c90fdf not found: ID does not exist" containerID="2ed655826a97f244eed78bfe0f91c46fc389f9e48e654dd8539a61ec52c90fdf" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.238486 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ed655826a97f244eed78bfe0f91c46fc389f9e48e654dd8539a61ec52c90fdf"} err="failed to get container status \"2ed655826a97f244eed78bfe0f91c46fc389f9e48e654dd8539a61ec52c90fdf\": rpc error: code = NotFound desc = could not find container \"2ed655826a97f244eed78bfe0f91c46fc389f9e48e654dd8539a61ec52c90fdf\": container with ID starting with 2ed655826a97f244eed78bfe0f91c46fc389f9e48e654dd8539a61ec52c90fdf not found: ID does not exist" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.238513 5045 scope.go:117] "RemoveContainer" containerID="b9a2a865d4409d016ee3770e3b5eacd69f918af1c6ae25ade9d8c1dfe59e8eac" Nov 25 23:05:19 crc kubenswrapper[5045]: E1125 23:05:19.240256 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b9a2a865d4409d016ee3770e3b5eacd69f918af1c6ae25ade9d8c1dfe59e8eac\": container with ID starting with b9a2a865d4409d016ee3770e3b5eacd69f918af1c6ae25ade9d8c1dfe59e8eac not found: ID does not exist" containerID="b9a2a865d4409d016ee3770e3b5eacd69f918af1c6ae25ade9d8c1dfe59e8eac" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.240288 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b9a2a865d4409d016ee3770e3b5eacd69f918af1c6ae25ade9d8c1dfe59e8eac"} err="failed to get container status \"b9a2a865d4409d016ee3770e3b5eacd69f918af1c6ae25ade9d8c1dfe59e8eac\": rpc error: code = NotFound desc = could not find container \"b9a2a865d4409d016ee3770e3b5eacd69f918af1c6ae25ade9d8c1dfe59e8eac\": container with ID starting with b9a2a865d4409d016ee3770e3b5eacd69f918af1c6ae25ade9d8c1dfe59e8eac not found: ID does not exist" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.240310 5045 scope.go:117] "RemoveContainer" containerID="ea1d85f49b121331753b72dbf238889ed764621a9a5031644e4459b300c5b18c" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.241704 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cntjg"] Nov 25 23:05:19 crc kubenswrapper[5045]: E1125 23:05:19.242484 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea1d85f49b121331753b72dbf238889ed764621a9a5031644e4459b300c5b18c\": container with ID starting with ea1d85f49b121331753b72dbf238889ed764621a9a5031644e4459b300c5b18c not found: ID does not exist" containerID="ea1d85f49b121331753b72dbf238889ed764621a9a5031644e4459b300c5b18c" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.242510 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea1d85f49b121331753b72dbf238889ed764621a9a5031644e4459b300c5b18c"} err="failed to get container status \"ea1d85f49b121331753b72dbf238889ed764621a9a5031644e4459b300c5b18c\": rpc error: code = NotFound desc = could not find container \"ea1d85f49b121331753b72dbf238889ed764621a9a5031644e4459b300c5b18c\": container with ID starting with ea1d85f49b121331753b72dbf238889ed764621a9a5031644e4459b300c5b18c not found: ID does not exist" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.242527 5045 scope.go:117] "RemoveContainer" containerID="edcf934a61d1c587508cb870d80b46998eb1e05060cd6ae873f684440ca755a5" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.257788 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-cntjg"] Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.257842 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-84zml"] Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.261959 5045 scope.go:117] "RemoveContainer" containerID="bdc33d5726221e795b79464ad5a342b34c9cdc4a40d800233cb7502c541369d5" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.268808 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-84zml"] Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.276560 5045 scope.go:117] "RemoveContainer" containerID="762c86fa1d63c0fe0e947e795752af72f7c4ec4c82e84747e291273958c822b9" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.282498 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wzn6m"] Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.287956 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wzn6m"] Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.288683 5045 scope.go:117] "RemoveContainer" containerID="edcf934a61d1c587508cb870d80b46998eb1e05060cd6ae873f684440ca755a5" Nov 25 23:05:19 crc kubenswrapper[5045]: E1125 23:05:19.289619 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"edcf934a61d1c587508cb870d80b46998eb1e05060cd6ae873f684440ca755a5\": container with ID starting with edcf934a61d1c587508cb870d80b46998eb1e05060cd6ae873f684440ca755a5 not found: ID does not exist" containerID="edcf934a61d1c587508cb870d80b46998eb1e05060cd6ae873f684440ca755a5" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.289648 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edcf934a61d1c587508cb870d80b46998eb1e05060cd6ae873f684440ca755a5"} err="failed to get container status \"edcf934a61d1c587508cb870d80b46998eb1e05060cd6ae873f684440ca755a5\": rpc error: code = NotFound desc = could not find container \"edcf934a61d1c587508cb870d80b46998eb1e05060cd6ae873f684440ca755a5\": container with ID starting with edcf934a61d1c587508cb870d80b46998eb1e05060cd6ae873f684440ca755a5 not found: ID does not exist" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.289670 5045 scope.go:117] "RemoveContainer" containerID="bdc33d5726221e795b79464ad5a342b34c9cdc4a40d800233cb7502c541369d5" Nov 25 23:05:19 crc kubenswrapper[5045]: E1125 23:05:19.289933 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdc33d5726221e795b79464ad5a342b34c9cdc4a40d800233cb7502c541369d5\": container with ID starting with bdc33d5726221e795b79464ad5a342b34c9cdc4a40d800233cb7502c541369d5 not found: ID does not exist" containerID="bdc33d5726221e795b79464ad5a342b34c9cdc4a40d800233cb7502c541369d5" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.289957 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdc33d5726221e795b79464ad5a342b34c9cdc4a40d800233cb7502c541369d5"} err="failed to get container status \"bdc33d5726221e795b79464ad5a342b34c9cdc4a40d800233cb7502c541369d5\": rpc error: code = NotFound desc = could not find container \"bdc33d5726221e795b79464ad5a342b34c9cdc4a40d800233cb7502c541369d5\": container with ID starting with bdc33d5726221e795b79464ad5a342b34c9cdc4a40d800233cb7502c541369d5 not found: ID does not exist" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.289973 5045 scope.go:117] "RemoveContainer" containerID="762c86fa1d63c0fe0e947e795752af72f7c4ec4c82e84747e291273958c822b9" Nov 25 23:05:19 crc kubenswrapper[5045]: E1125 23:05:19.290142 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"762c86fa1d63c0fe0e947e795752af72f7c4ec4c82e84747e291273958c822b9\": container with ID starting with 762c86fa1d63c0fe0e947e795752af72f7c4ec4c82e84747e291273958c822b9 not found: ID does not exist" containerID="762c86fa1d63c0fe0e947e795752af72f7c4ec4c82e84747e291273958c822b9" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.290165 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"762c86fa1d63c0fe0e947e795752af72f7c4ec4c82e84747e291273958c822b9"} err="failed to get container status \"762c86fa1d63c0fe0e947e795752af72f7c4ec4c82e84747e291273958c822b9\": rpc error: code = NotFound desc = could not find container \"762c86fa1d63c0fe0e947e795752af72f7c4ec4c82e84747e291273958c822b9\": container with ID starting with 762c86fa1d63c0fe0e947e795752af72f7c4ec4c82e84747e291273958c822b9 not found: ID does not exist" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.290180 5045 scope.go:117] "RemoveContainer" containerID="5cac7e5395874d502d01520f2c84a3cf3d2a1ed938361e835d66e662333c6ecf" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.306763 5045 scope.go:117] "RemoveContainer" containerID="5cac7e5395874d502d01520f2c84a3cf3d2a1ed938361e835d66e662333c6ecf" Nov 25 23:05:19 crc kubenswrapper[5045]: E1125 23:05:19.307070 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cac7e5395874d502d01520f2c84a3cf3d2a1ed938361e835d66e662333c6ecf\": container with ID starting with 5cac7e5395874d502d01520f2c84a3cf3d2a1ed938361e835d66e662333c6ecf not found: ID does not exist" containerID="5cac7e5395874d502d01520f2c84a3cf3d2a1ed938361e835d66e662333c6ecf" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.307110 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cac7e5395874d502d01520f2c84a3cf3d2a1ed938361e835d66e662333c6ecf"} err="failed to get container status \"5cac7e5395874d502d01520f2c84a3cf3d2a1ed938361e835d66e662333c6ecf\": rpc error: code = NotFound desc = could not find container \"5cac7e5395874d502d01520f2c84a3cf3d2a1ed938361e835d66e662333c6ecf\": container with ID starting with 5cac7e5395874d502d01520f2c84a3cf3d2a1ed938361e835d66e662333c6ecf not found: ID does not exist" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.307139 5045 scope.go:117] "RemoveContainer" containerID="5ca1956ce6e1bd8cf01a60299f45ea23e286a1355dab057d963ec532b4661c27" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.327473 5045 scope.go:117] "RemoveContainer" containerID="104e6e4721f788ddba35f592c82c8dc3ca1a26bc6ee0590788dfcccc7686f757" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.343964 5045 scope.go:117] "RemoveContainer" containerID="b8c96a9c7dc8a8fda82c3dcee889c49a0352ff067cc415a6ad6d168e67059531" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.361109 5045 scope.go:117] "RemoveContainer" containerID="5ca1956ce6e1bd8cf01a60299f45ea23e286a1355dab057d963ec532b4661c27" Nov 25 23:05:19 crc kubenswrapper[5045]: E1125 23:05:19.361459 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ca1956ce6e1bd8cf01a60299f45ea23e286a1355dab057d963ec532b4661c27\": container with ID starting with 5ca1956ce6e1bd8cf01a60299f45ea23e286a1355dab057d963ec532b4661c27 not found: ID does not exist" containerID="5ca1956ce6e1bd8cf01a60299f45ea23e286a1355dab057d963ec532b4661c27" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.361487 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ca1956ce6e1bd8cf01a60299f45ea23e286a1355dab057d963ec532b4661c27"} err="failed to get container status \"5ca1956ce6e1bd8cf01a60299f45ea23e286a1355dab057d963ec532b4661c27\": rpc error: code = NotFound desc = could not find container \"5ca1956ce6e1bd8cf01a60299f45ea23e286a1355dab057d963ec532b4661c27\": container with ID starting with 5ca1956ce6e1bd8cf01a60299f45ea23e286a1355dab057d963ec532b4661c27 not found: ID does not exist" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.361509 5045 scope.go:117] "RemoveContainer" containerID="104e6e4721f788ddba35f592c82c8dc3ca1a26bc6ee0590788dfcccc7686f757" Nov 25 23:05:19 crc kubenswrapper[5045]: E1125 23:05:19.362427 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"104e6e4721f788ddba35f592c82c8dc3ca1a26bc6ee0590788dfcccc7686f757\": container with ID starting with 104e6e4721f788ddba35f592c82c8dc3ca1a26bc6ee0590788dfcccc7686f757 not found: ID does not exist" containerID="104e6e4721f788ddba35f592c82c8dc3ca1a26bc6ee0590788dfcccc7686f757" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.362454 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"104e6e4721f788ddba35f592c82c8dc3ca1a26bc6ee0590788dfcccc7686f757"} err="failed to get container status \"104e6e4721f788ddba35f592c82c8dc3ca1a26bc6ee0590788dfcccc7686f757\": rpc error: code = NotFound desc = could not find container \"104e6e4721f788ddba35f592c82c8dc3ca1a26bc6ee0590788dfcccc7686f757\": container with ID starting with 104e6e4721f788ddba35f592c82c8dc3ca1a26bc6ee0590788dfcccc7686f757 not found: ID does not exist" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.362473 5045 scope.go:117] "RemoveContainer" containerID="b8c96a9c7dc8a8fda82c3dcee889c49a0352ff067cc415a6ad6d168e67059531" Nov 25 23:05:19 crc kubenswrapper[5045]: E1125 23:05:19.364423 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8c96a9c7dc8a8fda82c3dcee889c49a0352ff067cc415a6ad6d168e67059531\": container with ID starting with b8c96a9c7dc8a8fda82c3dcee889c49a0352ff067cc415a6ad6d168e67059531 not found: ID does not exist" containerID="b8c96a9c7dc8a8fda82c3dcee889c49a0352ff067cc415a6ad6d168e67059531" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.364455 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8c96a9c7dc8a8fda82c3dcee889c49a0352ff067cc415a6ad6d168e67059531"} err="failed to get container status \"b8c96a9c7dc8a8fda82c3dcee889c49a0352ff067cc415a6ad6d168e67059531\": rpc error: code = NotFound desc = could not find container \"b8c96a9c7dc8a8fda82c3dcee889c49a0352ff067cc415a6ad6d168e67059531\": container with ID starting with b8c96a9c7dc8a8fda82c3dcee889c49a0352ff067cc415a6ad6d168e67059531 not found: ID does not exist" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.364474 5045 scope.go:117] "RemoveContainer" containerID="93d284e10f6e955c17fc17d5b29443358521e91246251ddd51c9c21301f684d1" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.378577 5045 scope.go:117] "RemoveContainer" containerID="b7aa1e179987a81f2674d757af38de20e9a73d204c212481f0ccfa87df3b1564" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.390922 5045 scope.go:117] "RemoveContainer" containerID="3d7aa5838756709b9c2c8addaef9ce28e2588f4b3836d472ff987f246b7a44fa" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.402944 5045 scope.go:117] "RemoveContainer" containerID="93d284e10f6e955c17fc17d5b29443358521e91246251ddd51c9c21301f684d1" Nov 25 23:05:19 crc kubenswrapper[5045]: E1125 23:05:19.403245 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93d284e10f6e955c17fc17d5b29443358521e91246251ddd51c9c21301f684d1\": container with ID starting with 93d284e10f6e955c17fc17d5b29443358521e91246251ddd51c9c21301f684d1 not found: ID does not exist" containerID="93d284e10f6e955c17fc17d5b29443358521e91246251ddd51c9c21301f684d1" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.403268 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93d284e10f6e955c17fc17d5b29443358521e91246251ddd51c9c21301f684d1"} err="failed to get container status \"93d284e10f6e955c17fc17d5b29443358521e91246251ddd51c9c21301f684d1\": rpc error: code = NotFound desc = could not find container \"93d284e10f6e955c17fc17d5b29443358521e91246251ddd51c9c21301f684d1\": container with ID starting with 93d284e10f6e955c17fc17d5b29443358521e91246251ddd51c9c21301f684d1 not found: ID does not exist" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.403293 5045 scope.go:117] "RemoveContainer" containerID="b7aa1e179987a81f2674d757af38de20e9a73d204c212481f0ccfa87df3b1564" Nov 25 23:05:19 crc kubenswrapper[5045]: E1125 23:05:19.403548 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7aa1e179987a81f2674d757af38de20e9a73d204c212481f0ccfa87df3b1564\": container with ID starting with b7aa1e179987a81f2674d757af38de20e9a73d204c212481f0ccfa87df3b1564 not found: ID does not exist" containerID="b7aa1e179987a81f2674d757af38de20e9a73d204c212481f0ccfa87df3b1564" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.403565 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7aa1e179987a81f2674d757af38de20e9a73d204c212481f0ccfa87df3b1564"} err="failed to get container status \"b7aa1e179987a81f2674d757af38de20e9a73d204c212481f0ccfa87df3b1564\": rpc error: code = NotFound desc = could not find container \"b7aa1e179987a81f2674d757af38de20e9a73d204c212481f0ccfa87df3b1564\": container with ID starting with b7aa1e179987a81f2674d757af38de20e9a73d204c212481f0ccfa87df3b1564 not found: ID does not exist" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.403578 5045 scope.go:117] "RemoveContainer" containerID="3d7aa5838756709b9c2c8addaef9ce28e2588f4b3836d472ff987f246b7a44fa" Nov 25 23:05:19 crc kubenswrapper[5045]: E1125 23:05:19.403809 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d7aa5838756709b9c2c8addaef9ce28e2588f4b3836d472ff987f246b7a44fa\": container with ID starting with 3d7aa5838756709b9c2c8addaef9ce28e2588f4b3836d472ff987f246b7a44fa not found: ID does not exist" containerID="3d7aa5838756709b9c2c8addaef9ce28e2588f4b3836d472ff987f246b7a44fa" Nov 25 23:05:19 crc kubenswrapper[5045]: I1125 23:05:19.403829 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d7aa5838756709b9c2c8addaef9ce28e2588f4b3836d472ff987f246b7a44fa"} err="failed to get container status \"3d7aa5838756709b9c2c8addaef9ce28e2588f4b3836d472ff987f246b7a44fa\": rpc error: code = NotFound desc = could not find container \"3d7aa5838756709b9c2c8addaef9ce28e2588f4b3836d472ff987f246b7a44fa\": container with ID starting with 3d7aa5838756709b9c2c8addaef9ce28e2588f4b3836d472ff987f246b7a44fa not found: ID does not exist" Nov 25 23:05:20 crc kubenswrapper[5045]: I1125 23:05:20.145276 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-f5g5t" Nov 25 23:05:20 crc kubenswrapper[5045]: I1125 23:05:20.402793 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00bdfdd4-092b-4071-87c1-fb9386f7114e" path="/var/lib/kubelet/pods/00bdfdd4-092b-4071-87c1-fb9386f7114e/volumes" Nov 25 23:05:20 crc kubenswrapper[5045]: I1125 23:05:20.403397 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="571187eb-51e5-40d8-83b3-2295535de7e6" path="/var/lib/kubelet/pods/571187eb-51e5-40d8-83b3-2295535de7e6/volumes" Nov 25 23:05:20 crc kubenswrapper[5045]: I1125 23:05:20.403990 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7129135-79a3-478d-9ae4-78f7fe46280f" path="/var/lib/kubelet/pods/a7129135-79a3-478d-9ae4-78f7fe46280f/volumes" Nov 25 23:05:20 crc kubenswrapper[5045]: I1125 23:05:20.404868 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc573acb-eee1-4849-967f-fd1b253b640f" path="/var/lib/kubelet/pods/cc573acb-eee1-4849-967f-fd1b253b640f/volumes" Nov 25 23:05:20 crc kubenswrapper[5045]: I1125 23:05:20.405393 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb680718-f140-4525-950a-980e0dc1ed87" path="/var/lib/kubelet/pods/eb680718-f140-4525-950a-980e0dc1ed87/volumes" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.640757 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bnp6n"] Nov 25 23:05:24 crc kubenswrapper[5045]: E1125 23:05:24.641306 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc573acb-eee1-4849-967f-fd1b253b640f" containerName="extract-utilities" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.641323 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc573acb-eee1-4849-967f-fd1b253b640f" containerName="extract-utilities" Nov 25 23:05:24 crc kubenswrapper[5045]: E1125 23:05:24.641335 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7129135-79a3-478d-9ae4-78f7fe46280f" containerName="marketplace-operator" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.641344 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7129135-79a3-478d-9ae4-78f7fe46280f" containerName="marketplace-operator" Nov 25 23:05:24 crc kubenswrapper[5045]: E1125 23:05:24.641361 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb680718-f140-4525-950a-980e0dc1ed87" containerName="extract-content" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.641369 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb680718-f140-4525-950a-980e0dc1ed87" containerName="extract-content" Nov 25 23:05:24 crc kubenswrapper[5045]: E1125 23:05:24.641379 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc573acb-eee1-4849-967f-fd1b253b640f" containerName="registry-server" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.641386 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc573acb-eee1-4849-967f-fd1b253b640f" containerName="registry-server" Nov 25 23:05:24 crc kubenswrapper[5045]: E1125 23:05:24.641397 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="571187eb-51e5-40d8-83b3-2295535de7e6" containerName="extract-content" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.641405 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="571187eb-51e5-40d8-83b3-2295535de7e6" containerName="extract-content" Nov 25 23:05:24 crc kubenswrapper[5045]: E1125 23:05:24.641416 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb680718-f140-4525-950a-980e0dc1ed87" containerName="registry-server" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.641424 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb680718-f140-4525-950a-980e0dc1ed87" containerName="registry-server" Nov 25 23:05:24 crc kubenswrapper[5045]: E1125 23:05:24.641437 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00bdfdd4-092b-4071-87c1-fb9386f7114e" containerName="registry-server" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.641444 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="00bdfdd4-092b-4071-87c1-fb9386f7114e" containerName="registry-server" Nov 25 23:05:24 crc kubenswrapper[5045]: E1125 23:05:24.641454 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="571187eb-51e5-40d8-83b3-2295535de7e6" containerName="extract-utilities" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.641461 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="571187eb-51e5-40d8-83b3-2295535de7e6" containerName="extract-utilities" Nov 25 23:05:24 crc kubenswrapper[5045]: E1125 23:05:24.641472 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00bdfdd4-092b-4071-87c1-fb9386f7114e" containerName="extract-content" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.641480 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="00bdfdd4-092b-4071-87c1-fb9386f7114e" containerName="extract-content" Nov 25 23:05:24 crc kubenswrapper[5045]: E1125 23:05:24.641495 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb680718-f140-4525-950a-980e0dc1ed87" containerName="extract-utilities" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.641502 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb680718-f140-4525-950a-980e0dc1ed87" containerName="extract-utilities" Nov 25 23:05:24 crc kubenswrapper[5045]: E1125 23:05:24.641512 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00bdfdd4-092b-4071-87c1-fb9386f7114e" containerName="extract-utilities" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.641519 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="00bdfdd4-092b-4071-87c1-fb9386f7114e" containerName="extract-utilities" Nov 25 23:05:24 crc kubenswrapper[5045]: E1125 23:05:24.641563 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc573acb-eee1-4849-967f-fd1b253b640f" containerName="extract-content" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.641572 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc573acb-eee1-4849-967f-fd1b253b640f" containerName="extract-content" Nov 25 23:05:24 crc kubenswrapper[5045]: E1125 23:05:24.641617 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="571187eb-51e5-40d8-83b3-2295535de7e6" containerName="registry-server" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.641626 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="571187eb-51e5-40d8-83b3-2295535de7e6" containerName="registry-server" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.641774 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7129135-79a3-478d-9ae4-78f7fe46280f" containerName="marketplace-operator" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.641789 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb680718-f140-4525-950a-980e0dc1ed87" containerName="registry-server" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.641800 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="571187eb-51e5-40d8-83b3-2295535de7e6" containerName="registry-server" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.641811 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="00bdfdd4-092b-4071-87c1-fb9386f7114e" containerName="registry-server" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.641822 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc573acb-eee1-4849-967f-fd1b253b640f" containerName="registry-server" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.642560 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bnp6n" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.644786 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.648580 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bnp6n"] Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.811928 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3913a533-c2e2-4737-b52e-b90f29f979a5-utilities\") pod \"redhat-marketplace-bnp6n\" (UID: \"3913a533-c2e2-4737-b52e-b90f29f979a5\") " pod="openshift-marketplace/redhat-marketplace-bnp6n" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.812038 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3913a533-c2e2-4737-b52e-b90f29f979a5-catalog-content\") pod \"redhat-marketplace-bnp6n\" (UID: \"3913a533-c2e2-4737-b52e-b90f29f979a5\") " pod="openshift-marketplace/redhat-marketplace-bnp6n" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.812273 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjzbt\" (UniqueName: \"kubernetes.io/projected/3913a533-c2e2-4737-b52e-b90f29f979a5-kube-api-access-fjzbt\") pod \"redhat-marketplace-bnp6n\" (UID: \"3913a533-c2e2-4737-b52e-b90f29f979a5\") " pod="openshift-marketplace/redhat-marketplace-bnp6n" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.836394 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9k62x"] Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.839170 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9k62x" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.840498 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9k62x"] Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.841438 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.913825 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3913a533-c2e2-4737-b52e-b90f29f979a5-utilities\") pod \"redhat-marketplace-bnp6n\" (UID: \"3913a533-c2e2-4737-b52e-b90f29f979a5\") " pod="openshift-marketplace/redhat-marketplace-bnp6n" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.913888 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3913a533-c2e2-4737-b52e-b90f29f979a5-catalog-content\") pod \"redhat-marketplace-bnp6n\" (UID: \"3913a533-c2e2-4737-b52e-b90f29f979a5\") " pod="openshift-marketplace/redhat-marketplace-bnp6n" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.914008 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjzbt\" (UniqueName: \"kubernetes.io/projected/3913a533-c2e2-4737-b52e-b90f29f979a5-kube-api-access-fjzbt\") pod \"redhat-marketplace-bnp6n\" (UID: \"3913a533-c2e2-4737-b52e-b90f29f979a5\") " pod="openshift-marketplace/redhat-marketplace-bnp6n" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.914410 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3913a533-c2e2-4737-b52e-b90f29f979a5-catalog-content\") pod \"redhat-marketplace-bnp6n\" (UID: \"3913a533-c2e2-4737-b52e-b90f29f979a5\") " pod="openshift-marketplace/redhat-marketplace-bnp6n" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.914724 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3913a533-c2e2-4737-b52e-b90f29f979a5-utilities\") pod \"redhat-marketplace-bnp6n\" (UID: \"3913a533-c2e2-4737-b52e-b90f29f979a5\") " pod="openshift-marketplace/redhat-marketplace-bnp6n" Nov 25 23:05:24 crc kubenswrapper[5045]: I1125 23:05:24.934150 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjzbt\" (UniqueName: \"kubernetes.io/projected/3913a533-c2e2-4737-b52e-b90f29f979a5-kube-api-access-fjzbt\") pod \"redhat-marketplace-bnp6n\" (UID: \"3913a533-c2e2-4737-b52e-b90f29f979a5\") " pod="openshift-marketplace/redhat-marketplace-bnp6n" Nov 25 23:05:25 crc kubenswrapper[5045]: I1125 23:05:25.016026 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c53fe041-30c1-448d-9eaa-1db8e0163b83-utilities\") pod \"certified-operators-9k62x\" (UID: \"c53fe041-30c1-448d-9eaa-1db8e0163b83\") " pod="openshift-marketplace/certified-operators-9k62x" Nov 25 23:05:25 crc kubenswrapper[5045]: I1125 23:05:25.016343 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c53fe041-30c1-448d-9eaa-1db8e0163b83-catalog-content\") pod \"certified-operators-9k62x\" (UID: \"c53fe041-30c1-448d-9eaa-1db8e0163b83\") " pod="openshift-marketplace/certified-operators-9k62x" Nov 25 23:05:25 crc kubenswrapper[5045]: I1125 23:05:25.016388 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbp9r\" (UniqueName: \"kubernetes.io/projected/c53fe041-30c1-448d-9eaa-1db8e0163b83-kube-api-access-tbp9r\") pod \"certified-operators-9k62x\" (UID: \"c53fe041-30c1-448d-9eaa-1db8e0163b83\") " pod="openshift-marketplace/certified-operators-9k62x" Nov 25 23:05:25 crc kubenswrapper[5045]: I1125 23:05:25.018699 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bnp6n" Nov 25 23:05:25 crc kubenswrapper[5045]: I1125 23:05:25.118531 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbp9r\" (UniqueName: \"kubernetes.io/projected/c53fe041-30c1-448d-9eaa-1db8e0163b83-kube-api-access-tbp9r\") pod \"certified-operators-9k62x\" (UID: \"c53fe041-30c1-448d-9eaa-1db8e0163b83\") " pod="openshift-marketplace/certified-operators-9k62x" Nov 25 23:05:25 crc kubenswrapper[5045]: I1125 23:05:25.118610 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c53fe041-30c1-448d-9eaa-1db8e0163b83-utilities\") pod \"certified-operators-9k62x\" (UID: \"c53fe041-30c1-448d-9eaa-1db8e0163b83\") " pod="openshift-marketplace/certified-operators-9k62x" Nov 25 23:05:25 crc kubenswrapper[5045]: I1125 23:05:25.118657 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c53fe041-30c1-448d-9eaa-1db8e0163b83-catalog-content\") pod \"certified-operators-9k62x\" (UID: \"c53fe041-30c1-448d-9eaa-1db8e0163b83\") " pod="openshift-marketplace/certified-operators-9k62x" Nov 25 23:05:25 crc kubenswrapper[5045]: I1125 23:05:25.119226 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c53fe041-30c1-448d-9eaa-1db8e0163b83-utilities\") pod \"certified-operators-9k62x\" (UID: \"c53fe041-30c1-448d-9eaa-1db8e0163b83\") " pod="openshift-marketplace/certified-operators-9k62x" Nov 25 23:05:25 crc kubenswrapper[5045]: I1125 23:05:25.119297 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c53fe041-30c1-448d-9eaa-1db8e0163b83-catalog-content\") pod \"certified-operators-9k62x\" (UID: \"c53fe041-30c1-448d-9eaa-1db8e0163b83\") " pod="openshift-marketplace/certified-operators-9k62x" Nov 25 23:05:25 crc kubenswrapper[5045]: I1125 23:05:25.146782 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbp9r\" (UniqueName: \"kubernetes.io/projected/c53fe041-30c1-448d-9eaa-1db8e0163b83-kube-api-access-tbp9r\") pod \"certified-operators-9k62x\" (UID: \"c53fe041-30c1-448d-9eaa-1db8e0163b83\") " pod="openshift-marketplace/certified-operators-9k62x" Nov 25 23:05:25 crc kubenswrapper[5045]: I1125 23:05:25.154337 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9k62x" Nov 25 23:05:25 crc kubenswrapper[5045]: I1125 23:05:25.439257 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bnp6n"] Nov 25 23:05:25 crc kubenswrapper[5045]: I1125 23:05:25.549268 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9k62x"] Nov 25 23:05:25 crc kubenswrapper[5045]: W1125 23:05:25.574088 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3913a533_c2e2_4737_b52e_b90f29f979a5.slice/crio-f3a15ac2604b48d2a04d7da4a1d750b1edd422a9c21d9a2ee8a7daa4e6f457f8 WatchSource:0}: Error finding container f3a15ac2604b48d2a04d7da4a1d750b1edd422a9c21d9a2ee8a7daa4e6f457f8: Status 404 returned error can't find the container with id f3a15ac2604b48d2a04d7da4a1d750b1edd422a9c21d9a2ee8a7daa4e6f457f8 Nov 25 23:05:25 crc kubenswrapper[5045]: W1125 23:05:25.574678 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc53fe041_30c1_448d_9eaa_1db8e0163b83.slice/crio-c5b91578aad42b0843b11e279ebf60c0cad992a66aba90f3b0a2f558d1fc25ec WatchSource:0}: Error finding container c5b91578aad42b0843b11e279ebf60c0cad992a66aba90f3b0a2f558d1fc25ec: Status 404 returned error can't find the container with id c5b91578aad42b0843b11e279ebf60c0cad992a66aba90f3b0a2f558d1fc25ec Nov 25 23:05:26 crc kubenswrapper[5045]: I1125 23:05:26.202758 5045 generic.go:334] "Generic (PLEG): container finished" podID="3913a533-c2e2-4737-b52e-b90f29f979a5" containerID="8fa8ecb73e6366ee6bd8fa7fa407f8062a913ce888f9bcebf2e08d8ec12ae464" exitCode=0 Nov 25 23:05:26 crc kubenswrapper[5045]: I1125 23:05:26.202872 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bnp6n" event={"ID":"3913a533-c2e2-4737-b52e-b90f29f979a5","Type":"ContainerDied","Data":"8fa8ecb73e6366ee6bd8fa7fa407f8062a913ce888f9bcebf2e08d8ec12ae464"} Nov 25 23:05:26 crc kubenswrapper[5045]: I1125 23:05:26.203256 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bnp6n" event={"ID":"3913a533-c2e2-4737-b52e-b90f29f979a5","Type":"ContainerStarted","Data":"f3a15ac2604b48d2a04d7da4a1d750b1edd422a9c21d9a2ee8a7daa4e6f457f8"} Nov 25 23:05:26 crc kubenswrapper[5045]: I1125 23:05:26.208811 5045 generic.go:334] "Generic (PLEG): container finished" podID="c53fe041-30c1-448d-9eaa-1db8e0163b83" containerID="4c45d604a46fb928f1263c64fe5513704c1f7ae758b80d6f21394e29ad5f56cd" exitCode=0 Nov 25 23:05:26 crc kubenswrapper[5045]: I1125 23:05:26.208953 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9k62x" event={"ID":"c53fe041-30c1-448d-9eaa-1db8e0163b83","Type":"ContainerDied","Data":"4c45d604a46fb928f1263c64fe5513704c1f7ae758b80d6f21394e29ad5f56cd"} Nov 25 23:05:26 crc kubenswrapper[5045]: I1125 23:05:26.209000 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9k62x" event={"ID":"c53fe041-30c1-448d-9eaa-1db8e0163b83","Type":"ContainerStarted","Data":"c5b91578aad42b0843b11e279ebf60c0cad992a66aba90f3b0a2f558d1fc25ec"} Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.030374 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4v65k"] Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.031805 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4v65k" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.035724 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.057174 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4v65k"] Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.144525 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78a1fdd9-da9e-445f-9d75-167eff9d37a9-utilities\") pod \"community-operators-4v65k\" (UID: \"78a1fdd9-da9e-445f-9d75-167eff9d37a9\") " pod="openshift-marketplace/community-operators-4v65k" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.144592 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gvnf\" (UniqueName: \"kubernetes.io/projected/78a1fdd9-da9e-445f-9d75-167eff9d37a9-kube-api-access-5gvnf\") pod \"community-operators-4v65k\" (UID: \"78a1fdd9-da9e-445f-9d75-167eff9d37a9\") " pod="openshift-marketplace/community-operators-4v65k" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.144634 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78a1fdd9-da9e-445f-9d75-167eff9d37a9-catalog-content\") pod \"community-operators-4v65k\" (UID: \"78a1fdd9-da9e-445f-9d75-167eff9d37a9\") " pod="openshift-marketplace/community-operators-4v65k" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.234802 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bqxg8"] Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.239099 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bqxg8" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.240963 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.241127 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bqxg8"] Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.245496 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78a1fdd9-da9e-445f-9d75-167eff9d37a9-utilities\") pod \"community-operators-4v65k\" (UID: \"78a1fdd9-da9e-445f-9d75-167eff9d37a9\") " pod="openshift-marketplace/community-operators-4v65k" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.245561 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gvnf\" (UniqueName: \"kubernetes.io/projected/78a1fdd9-da9e-445f-9d75-167eff9d37a9-kube-api-access-5gvnf\") pod \"community-operators-4v65k\" (UID: \"78a1fdd9-da9e-445f-9d75-167eff9d37a9\") " pod="openshift-marketplace/community-operators-4v65k" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.245597 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78a1fdd9-da9e-445f-9d75-167eff9d37a9-catalog-content\") pod \"community-operators-4v65k\" (UID: \"78a1fdd9-da9e-445f-9d75-167eff9d37a9\") " pod="openshift-marketplace/community-operators-4v65k" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.246091 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78a1fdd9-da9e-445f-9d75-167eff9d37a9-catalog-content\") pod \"community-operators-4v65k\" (UID: \"78a1fdd9-da9e-445f-9d75-167eff9d37a9\") " pod="openshift-marketplace/community-operators-4v65k" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.246621 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78a1fdd9-da9e-445f-9d75-167eff9d37a9-utilities\") pod \"community-operators-4v65k\" (UID: \"78a1fdd9-da9e-445f-9d75-167eff9d37a9\") " pod="openshift-marketplace/community-operators-4v65k" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.275306 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gvnf\" (UniqueName: \"kubernetes.io/projected/78a1fdd9-da9e-445f-9d75-167eff9d37a9-kube-api-access-5gvnf\") pod \"community-operators-4v65k\" (UID: \"78a1fdd9-da9e-445f-9d75-167eff9d37a9\") " pod="openshift-marketplace/community-operators-4v65k" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.284621 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4v65k" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.347252 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fc579e4-208c-4708-aa65-f7b2262eada1-catalog-content\") pod \"redhat-operators-bqxg8\" (UID: \"6fc579e4-208c-4708-aa65-f7b2262eada1\") " pod="openshift-marketplace/redhat-operators-bqxg8" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.347567 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtwl6\" (UniqueName: \"kubernetes.io/projected/6fc579e4-208c-4708-aa65-f7b2262eada1-kube-api-access-qtwl6\") pod \"redhat-operators-bqxg8\" (UID: \"6fc579e4-208c-4708-aa65-f7b2262eada1\") " pod="openshift-marketplace/redhat-operators-bqxg8" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.347650 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fc579e4-208c-4708-aa65-f7b2262eada1-utilities\") pod \"redhat-operators-bqxg8\" (UID: \"6fc579e4-208c-4708-aa65-f7b2262eada1\") " pod="openshift-marketplace/redhat-operators-bqxg8" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.451014 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fc579e4-208c-4708-aa65-f7b2262eada1-catalog-content\") pod \"redhat-operators-bqxg8\" (UID: \"6fc579e4-208c-4708-aa65-f7b2262eada1\") " pod="openshift-marketplace/redhat-operators-bqxg8" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.451132 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtwl6\" (UniqueName: \"kubernetes.io/projected/6fc579e4-208c-4708-aa65-f7b2262eada1-kube-api-access-qtwl6\") pod \"redhat-operators-bqxg8\" (UID: \"6fc579e4-208c-4708-aa65-f7b2262eada1\") " pod="openshift-marketplace/redhat-operators-bqxg8" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.451338 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fc579e4-208c-4708-aa65-f7b2262eada1-utilities\") pod \"redhat-operators-bqxg8\" (UID: \"6fc579e4-208c-4708-aa65-f7b2262eada1\") " pod="openshift-marketplace/redhat-operators-bqxg8" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.451471 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fc579e4-208c-4708-aa65-f7b2262eada1-catalog-content\") pod \"redhat-operators-bqxg8\" (UID: \"6fc579e4-208c-4708-aa65-f7b2262eada1\") " pod="openshift-marketplace/redhat-operators-bqxg8" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.451733 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fc579e4-208c-4708-aa65-f7b2262eada1-utilities\") pod \"redhat-operators-bqxg8\" (UID: \"6fc579e4-208c-4708-aa65-f7b2262eada1\") " pod="openshift-marketplace/redhat-operators-bqxg8" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.476521 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtwl6\" (UniqueName: \"kubernetes.io/projected/6fc579e4-208c-4708-aa65-f7b2262eada1-kube-api-access-qtwl6\") pod \"redhat-operators-bqxg8\" (UID: \"6fc579e4-208c-4708-aa65-f7b2262eada1\") " pod="openshift-marketplace/redhat-operators-bqxg8" Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.480307 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4v65k"] Nov 25 23:05:27 crc kubenswrapper[5045]: W1125 23:05:27.523046 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod78a1fdd9_da9e_445f_9d75_167eff9d37a9.slice/crio-d1a557c8b7c8d6c93491f887c9608b571f52dbc96180e66bd5985aec0d743ae4 WatchSource:0}: Error finding container d1a557c8b7c8d6c93491f887c9608b571f52dbc96180e66bd5985aec0d743ae4: Status 404 returned error can't find the container with id d1a557c8b7c8d6c93491f887c9608b571f52dbc96180e66bd5985aec0d743ae4 Nov 25 23:05:27 crc kubenswrapper[5045]: I1125 23:05:27.623461 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bqxg8" Nov 25 23:05:28 crc kubenswrapper[5045]: I1125 23:05:28.003700 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bqxg8"] Nov 25 23:05:28 crc kubenswrapper[5045]: W1125 23:05:28.010986 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6fc579e4_208c_4708_aa65_f7b2262eada1.slice/crio-057561964d01a9234e5809bc46cbbd2305ddda9c1724099a147e00a9ce9fd0c2 WatchSource:0}: Error finding container 057561964d01a9234e5809bc46cbbd2305ddda9c1724099a147e00a9ce9fd0c2: Status 404 returned error can't find the container with id 057561964d01a9234e5809bc46cbbd2305ddda9c1724099a147e00a9ce9fd0c2 Nov 25 23:05:28 crc kubenswrapper[5045]: I1125 23:05:28.220275 5045 generic.go:334] "Generic (PLEG): container finished" podID="3913a533-c2e2-4737-b52e-b90f29f979a5" containerID="3c89731d3ae6232f05a6a617875e04c611d5a6aea3aeb7a4b420e00b8c5e492b" exitCode=0 Nov 25 23:05:28 crc kubenswrapper[5045]: I1125 23:05:28.220349 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bnp6n" event={"ID":"3913a533-c2e2-4737-b52e-b90f29f979a5","Type":"ContainerDied","Data":"3c89731d3ae6232f05a6a617875e04c611d5a6aea3aeb7a4b420e00b8c5e492b"} Nov 25 23:05:28 crc kubenswrapper[5045]: I1125 23:05:28.221560 5045 generic.go:334] "Generic (PLEG): container finished" podID="78a1fdd9-da9e-445f-9d75-167eff9d37a9" containerID="3edc9e21e660feaa62dc5399be8d8b54fd3860920aa0bbdf8f0f8fcd4868d50d" exitCode=0 Nov 25 23:05:28 crc kubenswrapper[5045]: I1125 23:05:28.221598 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4v65k" event={"ID":"78a1fdd9-da9e-445f-9d75-167eff9d37a9","Type":"ContainerDied","Data":"3edc9e21e660feaa62dc5399be8d8b54fd3860920aa0bbdf8f0f8fcd4868d50d"} Nov 25 23:05:28 crc kubenswrapper[5045]: I1125 23:05:28.221613 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4v65k" event={"ID":"78a1fdd9-da9e-445f-9d75-167eff9d37a9","Type":"ContainerStarted","Data":"d1a557c8b7c8d6c93491f887c9608b571f52dbc96180e66bd5985aec0d743ae4"} Nov 25 23:05:28 crc kubenswrapper[5045]: I1125 23:05:28.224766 5045 generic.go:334] "Generic (PLEG): container finished" podID="c53fe041-30c1-448d-9eaa-1db8e0163b83" containerID="9d571db3955ae65b4b51afe8d96aa7cf50cdab77e6867a97ac489dee820db33c" exitCode=0 Nov 25 23:05:28 crc kubenswrapper[5045]: I1125 23:05:28.224803 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9k62x" event={"ID":"c53fe041-30c1-448d-9eaa-1db8e0163b83","Type":"ContainerDied","Data":"9d571db3955ae65b4b51afe8d96aa7cf50cdab77e6867a97ac489dee820db33c"} Nov 25 23:05:28 crc kubenswrapper[5045]: I1125 23:05:28.226960 5045 generic.go:334] "Generic (PLEG): container finished" podID="6fc579e4-208c-4708-aa65-f7b2262eada1" containerID="48d060f5a9488abbec39af0210618e920f745658d173e985ddb229959330fa7a" exitCode=0 Nov 25 23:05:28 crc kubenswrapper[5045]: I1125 23:05:28.226984 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bqxg8" event={"ID":"6fc579e4-208c-4708-aa65-f7b2262eada1","Type":"ContainerDied","Data":"48d060f5a9488abbec39af0210618e920f745658d173e985ddb229959330fa7a"} Nov 25 23:05:28 crc kubenswrapper[5045]: I1125 23:05:28.226999 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bqxg8" event={"ID":"6fc579e4-208c-4708-aa65-f7b2262eada1","Type":"ContainerStarted","Data":"057561964d01a9234e5809bc46cbbd2305ddda9c1724099a147e00a9ce9fd0c2"} Nov 25 23:05:29 crc kubenswrapper[5045]: I1125 23:05:29.255220 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9k62x" event={"ID":"c53fe041-30c1-448d-9eaa-1db8e0163b83","Type":"ContainerStarted","Data":"cce0982bcf0a2962082a7978e272b4f5731d67c6273a3d77b4bc918151f07a1e"} Nov 25 23:05:29 crc kubenswrapper[5045]: I1125 23:05:29.257924 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bqxg8" event={"ID":"6fc579e4-208c-4708-aa65-f7b2262eada1","Type":"ContainerStarted","Data":"2739f6d27d6c70eb13efdbe66232a00a9817e776bd5bb9a4b85d7a5472cc37de"} Nov 25 23:05:29 crc kubenswrapper[5045]: I1125 23:05:29.260399 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bnp6n" event={"ID":"3913a533-c2e2-4737-b52e-b90f29f979a5","Type":"ContainerStarted","Data":"71788c3b35911b655df1378a11a34187940d48f6d43f645b78249a4ba59dac06"} Nov 25 23:05:29 crc kubenswrapper[5045]: I1125 23:05:29.262364 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4v65k" event={"ID":"78a1fdd9-da9e-445f-9d75-167eff9d37a9","Type":"ContainerStarted","Data":"a7c18f0621a4a74091ccb2a1b4c8fa93576f84505380e6803e4132c2160b7c2b"} Nov 25 23:05:29 crc kubenswrapper[5045]: I1125 23:05:29.288849 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9k62x" podStartSLOduration=2.631070214 podStartE2EDuration="5.288835259s" podCreationTimestamp="2025-11-25 23:05:24 +0000 UTC" firstStartedPulling="2025-11-25 23:05:26.211256052 +0000 UTC m=+382.568915164" lastFinishedPulling="2025-11-25 23:05:28.869021067 +0000 UTC m=+385.226680209" observedRunningTime="2025-11-25 23:05:29.286098328 +0000 UTC m=+385.643757430" watchObservedRunningTime="2025-11-25 23:05:29.288835259 +0000 UTC m=+385.646494371" Nov 25 23:05:30 crc kubenswrapper[5045]: I1125 23:05:30.270431 5045 generic.go:334] "Generic (PLEG): container finished" podID="78a1fdd9-da9e-445f-9d75-167eff9d37a9" containerID="a7c18f0621a4a74091ccb2a1b4c8fa93576f84505380e6803e4132c2160b7c2b" exitCode=0 Nov 25 23:05:30 crc kubenswrapper[5045]: I1125 23:05:30.270471 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4v65k" event={"ID":"78a1fdd9-da9e-445f-9d75-167eff9d37a9","Type":"ContainerDied","Data":"a7c18f0621a4a74091ccb2a1b4c8fa93576f84505380e6803e4132c2160b7c2b"} Nov 25 23:05:30 crc kubenswrapper[5045]: I1125 23:05:30.275089 5045 generic.go:334] "Generic (PLEG): container finished" podID="6fc579e4-208c-4708-aa65-f7b2262eada1" containerID="2739f6d27d6c70eb13efdbe66232a00a9817e776bd5bb9a4b85d7a5472cc37de" exitCode=0 Nov 25 23:05:30 crc kubenswrapper[5045]: I1125 23:05:30.275148 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bqxg8" event={"ID":"6fc579e4-208c-4708-aa65-f7b2262eada1","Type":"ContainerDied","Data":"2739f6d27d6c70eb13efdbe66232a00a9817e776bd5bb9a4b85d7a5472cc37de"} Nov 25 23:05:30 crc kubenswrapper[5045]: I1125 23:05:30.322452 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bnp6n" podStartSLOduration=3.455316451 podStartE2EDuration="6.322438116s" podCreationTimestamp="2025-11-25 23:05:24 +0000 UTC" firstStartedPulling="2025-11-25 23:05:26.20748615 +0000 UTC m=+382.565145302" lastFinishedPulling="2025-11-25 23:05:29.074607855 +0000 UTC m=+385.432266967" observedRunningTime="2025-11-25 23:05:30.319242771 +0000 UTC m=+386.676901883" watchObservedRunningTime="2025-11-25 23:05:30.322438116 +0000 UTC m=+386.680097228" Nov 25 23:05:30 crc kubenswrapper[5045]: I1125 23:05:30.540769 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:05:30 crc kubenswrapper[5045]: I1125 23:05:30.540831 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:05:30 crc kubenswrapper[5045]: I1125 23:05:30.540877 5045 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 23:05:30 crc kubenswrapper[5045]: I1125 23:05:30.541453 5045 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"43b4ed36c05a672d0150875c65cb7d95bf353277109bd9005a037fea7220d422"} pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 23:05:30 crc kubenswrapper[5045]: I1125 23:05:30.541519 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" containerID="cri-o://43b4ed36c05a672d0150875c65cb7d95bf353277109bd9005a037fea7220d422" gracePeriod=600 Nov 25 23:05:31 crc kubenswrapper[5045]: I1125 23:05:31.295110 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bqxg8" event={"ID":"6fc579e4-208c-4708-aa65-f7b2262eada1","Type":"ContainerStarted","Data":"ed02c6356f5eef9195b5b9812872e578850fde539ea17dd9e5d060b11005d8f5"} Nov 25 23:05:31 crc kubenswrapper[5045]: I1125 23:05:31.298463 5045 generic.go:334] "Generic (PLEG): container finished" podID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerID="43b4ed36c05a672d0150875c65cb7d95bf353277109bd9005a037fea7220d422" exitCode=0 Nov 25 23:05:31 crc kubenswrapper[5045]: I1125 23:05:31.298520 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerDied","Data":"43b4ed36c05a672d0150875c65cb7d95bf353277109bd9005a037fea7220d422"} Nov 25 23:05:31 crc kubenswrapper[5045]: I1125 23:05:31.298543 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerStarted","Data":"cc90319f958b5f9648cd5e926ff20552a29d869f3d08dacc8d4b7c3206f610aa"} Nov 25 23:05:31 crc kubenswrapper[5045]: I1125 23:05:31.298561 5045 scope.go:117] "RemoveContainer" containerID="fcf2e484f9d7666360f1c60b39ae46c76d0d0a2edc2a676f694f132eff81fd5c" Nov 25 23:05:31 crc kubenswrapper[5045]: I1125 23:05:31.303314 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4v65k" event={"ID":"78a1fdd9-da9e-445f-9d75-167eff9d37a9","Type":"ContainerStarted","Data":"50b90af4348da5f4ab4641879cdfe0a7e7fb1bab8dcb27879f55742fd092194f"} Nov 25 23:05:31 crc kubenswrapper[5045]: I1125 23:05:31.314385 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bqxg8" podStartSLOduration=1.809893016 podStartE2EDuration="4.314367271s" podCreationTimestamp="2025-11-25 23:05:27 +0000 UTC" firstStartedPulling="2025-11-25 23:05:28.227780206 +0000 UTC m=+384.585439318" lastFinishedPulling="2025-11-25 23:05:30.732254461 +0000 UTC m=+387.089913573" observedRunningTime="2025-11-25 23:05:31.310698822 +0000 UTC m=+387.668357934" watchObservedRunningTime="2025-11-25 23:05:31.314367271 +0000 UTC m=+387.672026393" Nov 25 23:05:31 crc kubenswrapper[5045]: I1125 23:05:31.328264 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4v65k" podStartSLOduration=1.808212277 podStartE2EDuration="4.328248134s" podCreationTimestamp="2025-11-25 23:05:27 +0000 UTC" firstStartedPulling="2025-11-25 23:05:28.225955202 +0000 UTC m=+384.583614314" lastFinishedPulling="2025-11-25 23:05:30.745991059 +0000 UTC m=+387.103650171" observedRunningTime="2025-11-25 23:05:31.324832213 +0000 UTC m=+387.682491345" watchObservedRunningTime="2025-11-25 23:05:31.328248134 +0000 UTC m=+387.685907246" Nov 25 23:05:35 crc kubenswrapper[5045]: I1125 23:05:35.019096 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bnp6n" Nov 25 23:05:35 crc kubenswrapper[5045]: I1125 23:05:35.019563 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bnp6n" Nov 25 23:05:35 crc kubenswrapper[5045]: I1125 23:05:35.100828 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bnp6n" Nov 25 23:05:35 crc kubenswrapper[5045]: I1125 23:05:35.155986 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9k62x" Nov 25 23:05:35 crc kubenswrapper[5045]: I1125 23:05:35.156037 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9k62x" Nov 25 23:05:35 crc kubenswrapper[5045]: I1125 23:05:35.207466 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9k62x" Nov 25 23:05:35 crc kubenswrapper[5045]: I1125 23:05:35.378423 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bnp6n" Nov 25 23:05:35 crc kubenswrapper[5045]: I1125 23:05:35.380402 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9k62x" Nov 25 23:05:36 crc kubenswrapper[5045]: I1125 23:05:36.330208 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" podUID="0e953287-8cf8-4561-8a48-731746910551" containerName="registry" containerID="cri-o://16cc8bd97e8dda0121ee7fbbdd719f5371122781d9c97e5a08b99ad6a203874d" gracePeriod=30 Nov 25 23:05:36 crc kubenswrapper[5045]: I1125 23:05:36.799937 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:05:36 crc kubenswrapper[5045]: I1125 23:05:36.977913 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0e953287-8cf8-4561-8a48-731746910551-bound-sa-token\") pod \"0e953287-8cf8-4561-8a48-731746910551\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " Nov 25 23:05:36 crc kubenswrapper[5045]: I1125 23:05:36.978001 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0e953287-8cf8-4561-8a48-731746910551-trusted-ca\") pod \"0e953287-8cf8-4561-8a48-731746910551\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " Nov 25 23:05:36 crc kubenswrapper[5045]: I1125 23:05:36.978061 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0e953287-8cf8-4561-8a48-731746910551-registry-tls\") pod \"0e953287-8cf8-4561-8a48-731746910551\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " Nov 25 23:05:36 crc kubenswrapper[5045]: I1125 23:05:36.978137 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-52r6g\" (UniqueName: \"kubernetes.io/projected/0e953287-8cf8-4561-8a48-731746910551-kube-api-access-52r6g\") pod \"0e953287-8cf8-4561-8a48-731746910551\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " Nov 25 23:05:36 crc kubenswrapper[5045]: I1125 23:05:36.978250 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0e953287-8cf8-4561-8a48-731746910551-installation-pull-secrets\") pod \"0e953287-8cf8-4561-8a48-731746910551\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " Nov 25 23:05:36 crc kubenswrapper[5045]: I1125 23:05:36.978550 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"0e953287-8cf8-4561-8a48-731746910551\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " Nov 25 23:05:36 crc kubenswrapper[5045]: I1125 23:05:36.978622 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0e953287-8cf8-4561-8a48-731746910551-registry-certificates\") pod \"0e953287-8cf8-4561-8a48-731746910551\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " Nov 25 23:05:36 crc kubenswrapper[5045]: I1125 23:05:36.978674 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0e953287-8cf8-4561-8a48-731746910551-ca-trust-extracted\") pod \"0e953287-8cf8-4561-8a48-731746910551\" (UID: \"0e953287-8cf8-4561-8a48-731746910551\") " Nov 25 23:05:36 crc kubenswrapper[5045]: I1125 23:05:36.979073 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e953287-8cf8-4561-8a48-731746910551-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "0e953287-8cf8-4561-8a48-731746910551" (UID: "0e953287-8cf8-4561-8a48-731746910551"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:05:36 crc kubenswrapper[5045]: I1125 23:05:36.981550 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e953287-8cf8-4561-8a48-731746910551-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "0e953287-8cf8-4561-8a48-731746910551" (UID: "0e953287-8cf8-4561-8a48-731746910551"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:05:36 crc kubenswrapper[5045]: I1125 23:05:36.985131 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e953287-8cf8-4561-8a48-731746910551-kube-api-access-52r6g" (OuterVolumeSpecName: "kube-api-access-52r6g") pod "0e953287-8cf8-4561-8a48-731746910551" (UID: "0e953287-8cf8-4561-8a48-731746910551"). InnerVolumeSpecName "kube-api-access-52r6g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:05:36 crc kubenswrapper[5045]: I1125 23:05:36.985380 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e953287-8cf8-4561-8a48-731746910551-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "0e953287-8cf8-4561-8a48-731746910551" (UID: "0e953287-8cf8-4561-8a48-731746910551"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:05:36 crc kubenswrapper[5045]: I1125 23:05:36.987733 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e953287-8cf8-4561-8a48-731746910551-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "0e953287-8cf8-4561-8a48-731746910551" (UID: "0e953287-8cf8-4561-8a48-731746910551"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:05:36 crc kubenswrapper[5045]: I1125 23:05:36.987942 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e953287-8cf8-4561-8a48-731746910551-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "0e953287-8cf8-4561-8a48-731746910551" (UID: "0e953287-8cf8-4561-8a48-731746910551"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:05:36 crc kubenswrapper[5045]: I1125 23:05:36.989488 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "0e953287-8cf8-4561-8a48-731746910551" (UID: "0e953287-8cf8-4561-8a48-731746910551"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.014746 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e953287-8cf8-4561-8a48-731746910551-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "0e953287-8cf8-4561-8a48-731746910551" (UID: "0e953287-8cf8-4561-8a48-731746910551"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.079753 5045 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0e953287-8cf8-4561-8a48-731746910551-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.079803 5045 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0e953287-8cf8-4561-8a48-731746910551-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.079821 5045 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0e953287-8cf8-4561-8a48-731746910551-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.079837 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-52r6g\" (UniqueName: \"kubernetes.io/projected/0e953287-8cf8-4561-8a48-731746910551-kube-api-access-52r6g\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.079858 5045 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0e953287-8cf8-4561-8a48-731746910551-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.079875 5045 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0e953287-8cf8-4561-8a48-731746910551-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.079890 5045 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0e953287-8cf8-4561-8a48-731746910551-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.285585 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4v65k" Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.285644 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4v65k" Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.326448 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4v65k" Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.347906 5045 generic.go:334] "Generic (PLEG): container finished" podID="0e953287-8cf8-4561-8a48-731746910551" containerID="16cc8bd97e8dda0121ee7fbbdd719f5371122781d9c97e5a08b99ad6a203874d" exitCode=0 Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.348003 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" event={"ID":"0e953287-8cf8-4561-8a48-731746910551","Type":"ContainerDied","Data":"16cc8bd97e8dda0121ee7fbbdd719f5371122781d9c97e5a08b99ad6a203874d"} Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.348048 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" event={"ID":"0e953287-8cf8-4561-8a48-731746910551","Type":"ContainerDied","Data":"b15ff89dde89d6d5f718eb54d668496efd14e9a0b1a24691a8230650660614b4"} Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.348072 5045 scope.go:117] "RemoveContainer" containerID="16cc8bd97e8dda0121ee7fbbdd719f5371122781d9c97e5a08b99ad6a203874d" Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.348896 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-9cmr2" Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.371921 5045 scope.go:117] "RemoveContainer" containerID="16cc8bd97e8dda0121ee7fbbdd719f5371122781d9c97e5a08b99ad6a203874d" Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.377473 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-9cmr2"] Nov 25 23:05:37 crc kubenswrapper[5045]: E1125 23:05:37.378051 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16cc8bd97e8dda0121ee7fbbdd719f5371122781d9c97e5a08b99ad6a203874d\": container with ID starting with 16cc8bd97e8dda0121ee7fbbdd719f5371122781d9c97e5a08b99ad6a203874d not found: ID does not exist" containerID="16cc8bd97e8dda0121ee7fbbdd719f5371122781d9c97e5a08b99ad6a203874d" Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.379487 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16cc8bd97e8dda0121ee7fbbdd719f5371122781d9c97e5a08b99ad6a203874d"} err="failed to get container status \"16cc8bd97e8dda0121ee7fbbdd719f5371122781d9c97e5a08b99ad6a203874d\": rpc error: code = NotFound desc = could not find container \"16cc8bd97e8dda0121ee7fbbdd719f5371122781d9c97e5a08b99ad6a203874d\": container with ID starting with 16cc8bd97e8dda0121ee7fbbdd719f5371122781d9c97e5a08b99ad6a203874d not found: ID does not exist" Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.382617 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-9cmr2"] Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.410824 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4v65k" Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.624015 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bqxg8" Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.624219 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bqxg8" Nov 25 23:05:37 crc kubenswrapper[5045]: I1125 23:05:37.667994 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bqxg8" Nov 25 23:05:38 crc kubenswrapper[5045]: I1125 23:05:38.406625 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e953287-8cf8-4561-8a48-731746910551" path="/var/lib/kubelet/pods/0e953287-8cf8-4561-8a48-731746910551/volumes" Nov 25 23:05:38 crc kubenswrapper[5045]: I1125 23:05:38.407536 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bqxg8" Nov 25 23:07:30 crc kubenswrapper[5045]: I1125 23:07:30.540761 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:07:30 crc kubenswrapper[5045]: I1125 23:07:30.541523 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:08:00 crc kubenswrapper[5045]: I1125 23:08:00.541138 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:08:00 crc kubenswrapper[5045]: I1125 23:08:00.541805 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:08:04 crc kubenswrapper[5045]: I1125 23:08:04.707283 5045 scope.go:117] "RemoveContainer" containerID="b60c7ea29e95069c84cf32da2b7cf1c9eed8397d9b2c08a4290f3bb798575976" Nov 25 23:08:30 crc kubenswrapper[5045]: I1125 23:08:30.540959 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:08:30 crc kubenswrapper[5045]: I1125 23:08:30.541809 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:08:30 crc kubenswrapper[5045]: I1125 23:08:30.541874 5045 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 23:08:30 crc kubenswrapper[5045]: I1125 23:08:30.542681 5045 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cc90319f958b5f9648cd5e926ff20552a29d869f3d08dacc8d4b7c3206f610aa"} pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 23:08:30 crc kubenswrapper[5045]: I1125 23:08:30.542837 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" containerID="cri-o://cc90319f958b5f9648cd5e926ff20552a29d869f3d08dacc8d4b7c3206f610aa" gracePeriod=600 Nov 25 23:08:31 crc kubenswrapper[5045]: I1125 23:08:31.560666 5045 generic.go:334] "Generic (PLEG): container finished" podID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerID="cc90319f958b5f9648cd5e926ff20552a29d869f3d08dacc8d4b7c3206f610aa" exitCode=0 Nov 25 23:08:31 crc kubenswrapper[5045]: I1125 23:08:31.560751 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerDied","Data":"cc90319f958b5f9648cd5e926ff20552a29d869f3d08dacc8d4b7c3206f610aa"} Nov 25 23:08:31 crc kubenswrapper[5045]: I1125 23:08:31.561396 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerStarted","Data":"b92b5d9b7e6348e5c2d32780968d464c8df43db35693b94adce9b088fa04e458"} Nov 25 23:08:31 crc kubenswrapper[5045]: I1125 23:08:31.561432 5045 scope.go:117] "RemoveContainer" containerID="43b4ed36c05a672d0150875c65cb7d95bf353277109bd9005a037fea7220d422" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.001432 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-tj5h4"] Nov 25 23:10:48 crc kubenswrapper[5045]: E1125 23:10:48.002122 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e953287-8cf8-4561-8a48-731746910551" containerName="registry" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.002135 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e953287-8cf8-4561-8a48-731746910551" containerName="registry" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.002255 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e953287-8cf8-4561-8a48-731746910551" containerName="registry" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.002679 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-tj5h4" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.005549 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-8rvmp"] Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.007681 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.008074 5045 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-bx8w4" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.008091 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-8rvmp" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.008319 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.010398 5045 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-5hsm5" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.028217 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-8rvmp"] Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.037246 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-lpn7b"] Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.038014 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-lpn7b" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.039464 5045 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-mggn6" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.053654 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-lpn7b"] Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.062656 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-tj5h4"] Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.074251 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gtcq\" (UniqueName: \"kubernetes.io/projected/581be194-4f18-4d0b-82fe-da014d72e03d-kube-api-access-9gtcq\") pod \"cert-manager-5b446d88c5-8rvmp\" (UID: \"581be194-4f18-4d0b-82fe-da014d72e03d\") " pod="cert-manager/cert-manager-5b446d88c5-8rvmp" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.074457 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfcdq\" (UniqueName: \"kubernetes.io/projected/ca7e1b0d-b147-46cb-9537-6026becd4866-kube-api-access-hfcdq\") pod \"cert-manager-cainjector-7f985d654d-tj5h4\" (UID: \"ca7e1b0d-b147-46cb-9537-6026becd4866\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-tj5h4" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.175814 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w76q9\" (UniqueName: \"kubernetes.io/projected/9f002a66-6d5b-49a1-881d-5ed4deb1a006-kube-api-access-w76q9\") pod \"cert-manager-webhook-5655c58dd6-lpn7b\" (UID: \"9f002a66-6d5b-49a1-881d-5ed4deb1a006\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-lpn7b" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.175890 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hfcdq\" (UniqueName: \"kubernetes.io/projected/ca7e1b0d-b147-46cb-9537-6026becd4866-kube-api-access-hfcdq\") pod \"cert-manager-cainjector-7f985d654d-tj5h4\" (UID: \"ca7e1b0d-b147-46cb-9537-6026becd4866\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-tj5h4" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.175929 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gtcq\" (UniqueName: \"kubernetes.io/projected/581be194-4f18-4d0b-82fe-da014d72e03d-kube-api-access-9gtcq\") pod \"cert-manager-5b446d88c5-8rvmp\" (UID: \"581be194-4f18-4d0b-82fe-da014d72e03d\") " pod="cert-manager/cert-manager-5b446d88c5-8rvmp" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.195744 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfcdq\" (UniqueName: \"kubernetes.io/projected/ca7e1b0d-b147-46cb-9537-6026becd4866-kube-api-access-hfcdq\") pod \"cert-manager-cainjector-7f985d654d-tj5h4\" (UID: \"ca7e1b0d-b147-46cb-9537-6026becd4866\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-tj5h4" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.196121 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gtcq\" (UniqueName: \"kubernetes.io/projected/581be194-4f18-4d0b-82fe-da014d72e03d-kube-api-access-9gtcq\") pod \"cert-manager-5b446d88c5-8rvmp\" (UID: \"581be194-4f18-4d0b-82fe-da014d72e03d\") " pod="cert-manager/cert-manager-5b446d88c5-8rvmp" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.276691 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w76q9\" (UniqueName: \"kubernetes.io/projected/9f002a66-6d5b-49a1-881d-5ed4deb1a006-kube-api-access-w76q9\") pod \"cert-manager-webhook-5655c58dd6-lpn7b\" (UID: \"9f002a66-6d5b-49a1-881d-5ed4deb1a006\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-lpn7b" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.305270 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w76q9\" (UniqueName: \"kubernetes.io/projected/9f002a66-6d5b-49a1-881d-5ed4deb1a006-kube-api-access-w76q9\") pod \"cert-manager-webhook-5655c58dd6-lpn7b\" (UID: \"9f002a66-6d5b-49a1-881d-5ed4deb1a006\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-lpn7b" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.324879 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-tj5h4" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.334455 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-8rvmp" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.353678 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-lpn7b" Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.657190 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-lpn7b"] Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.670764 5045 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.814772 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-tj5h4"] Nov 25 23:10:48 crc kubenswrapper[5045]: I1125 23:10:48.820253 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-8rvmp"] Nov 25 23:10:48 crc kubenswrapper[5045]: W1125 23:10:48.827825 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca7e1b0d_b147_46cb_9537_6026becd4866.slice/crio-6d6ce286243c704305c6624af0d73c7b7464cb69d39e1cbfaf08c1630d2d86fe WatchSource:0}: Error finding container 6d6ce286243c704305c6624af0d73c7b7464cb69d39e1cbfaf08c1630d2d86fe: Status 404 returned error can't find the container with id 6d6ce286243c704305c6624af0d73c7b7464cb69d39e1cbfaf08c1630d2d86fe Nov 25 23:10:49 crc kubenswrapper[5045]: I1125 23:10:49.465559 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-8rvmp" event={"ID":"581be194-4f18-4d0b-82fe-da014d72e03d","Type":"ContainerStarted","Data":"81e2fb0ea59fc6f28dc206b573b269bc1ffbb3a60707185c072b910cfa188cf8"} Nov 25 23:10:49 crc kubenswrapper[5045]: I1125 23:10:49.466627 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-tj5h4" event={"ID":"ca7e1b0d-b147-46cb-9537-6026becd4866","Type":"ContainerStarted","Data":"6d6ce286243c704305c6624af0d73c7b7464cb69d39e1cbfaf08c1630d2d86fe"} Nov 25 23:10:49 crc kubenswrapper[5045]: I1125 23:10:49.467485 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-lpn7b" event={"ID":"9f002a66-6d5b-49a1-881d-5ed4deb1a006","Type":"ContainerStarted","Data":"c491d0c35061cf3ab4aea67620cc67171e00d7e50b5388bf782ffd312555830d"} Nov 25 23:10:52 crc kubenswrapper[5045]: I1125 23:10:52.485978 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-lpn7b" event={"ID":"9f002a66-6d5b-49a1-881d-5ed4deb1a006","Type":"ContainerStarted","Data":"58c309c51316f471208c079e68db9c0b446e1b1a3b4a344e85534175c7517686"} Nov 25 23:10:52 crc kubenswrapper[5045]: I1125 23:10:52.486317 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-lpn7b" Nov 25 23:10:52 crc kubenswrapper[5045]: I1125 23:10:52.490970 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-tj5h4" event={"ID":"ca7e1b0d-b147-46cb-9537-6026becd4866","Type":"ContainerStarted","Data":"3ae2ef1b318f2b0a7878cfe5b321ac18f7e02c8576824d9e6d6e29b3ba23f2b8"} Nov 25 23:10:52 crc kubenswrapper[5045]: I1125 23:10:52.504401 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-lpn7b" podStartSLOduration=1.050220962 podStartE2EDuration="4.504383003s" podCreationTimestamp="2025-11-25 23:10:48 +0000 UTC" firstStartedPulling="2025-11-25 23:10:48.670445577 +0000 UTC m=+705.028104689" lastFinishedPulling="2025-11-25 23:10:52.124607618 +0000 UTC m=+708.482266730" observedRunningTime="2025-11-25 23:10:52.501252433 +0000 UTC m=+708.858911545" watchObservedRunningTime="2025-11-25 23:10:52.504383003 +0000 UTC m=+708.862042115" Nov 25 23:10:52 crc kubenswrapper[5045]: I1125 23:10:52.517676 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-tj5h4" podStartSLOduration=2.229037898 podStartE2EDuration="5.517661549s" podCreationTimestamp="2025-11-25 23:10:47 +0000 UTC" firstStartedPulling="2025-11-25 23:10:48.82988889 +0000 UTC m=+705.187548002" lastFinishedPulling="2025-11-25 23:10:52.118512541 +0000 UTC m=+708.476171653" observedRunningTime="2025-11-25 23:10:52.516327111 +0000 UTC m=+708.873986223" watchObservedRunningTime="2025-11-25 23:10:52.517661549 +0000 UTC m=+708.875320661" Nov 25 23:10:53 crc kubenswrapper[5045]: I1125 23:10:53.498151 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-8rvmp" event={"ID":"581be194-4f18-4d0b-82fe-da014d72e03d","Type":"ContainerStarted","Data":"ae93450c8cc9ff03d1383c2610b210bb48c28ff7ddd749220f88aecf1fe9ff4f"} Nov 25 23:10:53 crc kubenswrapper[5045]: I1125 23:10:53.517243 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-8rvmp" podStartSLOduration=2.159691584 podStartE2EDuration="6.517217634s" podCreationTimestamp="2025-11-25 23:10:47 +0000 UTC" firstStartedPulling="2025-11-25 23:10:48.832781244 +0000 UTC m=+705.190440366" lastFinishedPulling="2025-11-25 23:10:53.190307294 +0000 UTC m=+709.547966416" observedRunningTime="2025-11-25 23:10:53.514500125 +0000 UTC m=+709.872159257" watchObservedRunningTime="2025-11-25 23:10:53.517217634 +0000 UTC m=+709.874876776" Nov 25 23:10:58 crc kubenswrapper[5045]: I1125 23:10:58.358275 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-lpn7b" Nov 25 23:11:00 crc kubenswrapper[5045]: I1125 23:11:00.541475 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:11:00 crc kubenswrapper[5045]: I1125 23:11:00.541585 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:11:02 crc kubenswrapper[5045]: I1125 23:11:02.983438 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mrsr4"] Nov 25 23:11:02 crc kubenswrapper[5045]: I1125 23:11:02.983998 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovn-controller" containerID="cri-o://32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc" gracePeriod=30 Nov 25 23:11:02 crc kubenswrapper[5045]: I1125 23:11:02.984050 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="nbdb" containerID="cri-o://ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed" gracePeriod=30 Nov 25 23:11:02 crc kubenswrapper[5045]: I1125 23:11:02.984118 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="kube-rbac-proxy-node" containerID="cri-o://3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4" gracePeriod=30 Nov 25 23:11:02 crc kubenswrapper[5045]: I1125 23:11:02.984111 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef" gracePeriod=30 Nov 25 23:11:02 crc kubenswrapper[5045]: I1125 23:11:02.984172 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovn-acl-logging" containerID="cri-o://e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f" gracePeriod=30 Nov 25 23:11:02 crc kubenswrapper[5045]: I1125 23:11:02.984101 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="northd" containerID="cri-o://52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5" gracePeriod=30 Nov 25 23:11:02 crc kubenswrapper[5045]: I1125 23:11:02.984179 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="sbdb" containerID="cri-o://3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79" gracePeriod=30 Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.018095 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovnkube-controller" containerID="cri-o://465de3e412536c5c0b9115d77b543fd35888b34050cc3ab59f7c63fed4418e93" gracePeriod=30 Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.563372 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovnkube-controller/3.log" Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.566985 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovn-acl-logging/0.log" Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.568110 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovn-controller/0.log" Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.568687 5045 generic.go:334] "Generic (PLEG): container finished" podID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerID="465de3e412536c5c0b9115d77b543fd35888b34050cc3ab59f7c63fed4418e93" exitCode=0 Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.568729 5045 generic.go:334] "Generic (PLEG): container finished" podID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerID="3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79" exitCode=0 Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.568741 5045 generic.go:334] "Generic (PLEG): container finished" podID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerID="ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed" exitCode=0 Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.568751 5045 generic.go:334] "Generic (PLEG): container finished" podID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerID="52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5" exitCode=0 Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.568762 5045 generic.go:334] "Generic (PLEG): container finished" podID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerID="4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef" exitCode=0 Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.568787 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerDied","Data":"465de3e412536c5c0b9115d77b543fd35888b34050cc3ab59f7c63fed4418e93"} Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.568772 5045 generic.go:334] "Generic (PLEG): container finished" podID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerID="3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4" exitCode=0 Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.568880 5045 scope.go:117] "RemoveContainer" containerID="167e40a0da5c5b83f3f433e560723ae9cd40287f622c4f37daf958148fc9b4d6" Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.568896 5045 generic.go:334] "Generic (PLEG): container finished" podID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerID="e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f" exitCode=143 Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.568926 5045 generic.go:334] "Generic (PLEG): container finished" podID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerID="32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc" exitCode=143 Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.568857 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerDied","Data":"3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79"} Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.569019 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerDied","Data":"ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed"} Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.569056 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerDied","Data":"52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5"} Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.569084 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerDied","Data":"4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef"} Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.569114 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerDied","Data":"3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4"} Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.569141 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerDied","Data":"e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f"} Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.569167 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerDied","Data":"32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc"} Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.572531 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-ht6dm_e971a47d-97d5-4a21-a255-2497b2b3cbbc/kube-multus/2.log" Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.573398 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-ht6dm_e971a47d-97d5-4a21-a255-2497b2b3cbbc/kube-multus/1.log" Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.573802 5045 generic.go:334] "Generic (PLEG): container finished" podID="e971a47d-97d5-4a21-a255-2497b2b3cbbc" containerID="864184c90b5847350d56d3814873c9e37d409583f933093bca4086657a5f1b17" exitCode=2 Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.573847 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-ht6dm" event={"ID":"e971a47d-97d5-4a21-a255-2497b2b3cbbc","Type":"ContainerDied","Data":"864184c90b5847350d56d3814873c9e37d409583f933093bca4086657a5f1b17"} Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.574667 5045 scope.go:117] "RemoveContainer" containerID="864184c90b5847350d56d3814873c9e37d409583f933093bca4086657a5f1b17" Nov 25 23:11:03 crc kubenswrapper[5045]: E1125 23:11:03.575047 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-ht6dm_openshift-multus(e971a47d-97d5-4a21-a255-2497b2b3cbbc)\"" pod="openshift-multus/multus-ht6dm" podUID="e971a47d-97d5-4a21-a255-2497b2b3cbbc" Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.664588 5045 scope.go:117] "RemoveContainer" containerID="bfd5532dcf7609f7f39d9a3856d92510f05e1b76dd721f0b7c1eb4443b94f7d3" Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.968341 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovn-acl-logging/0.log" Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.969157 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovn-controller/0.log" Nov 25 23:11:03 crc kubenswrapper[5045]: I1125 23:11:03.969628 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041257 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-kzhk6"] Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.041495 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041513 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.041524 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovnkube-controller" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041532 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovnkube-controller" Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.041548 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="sbdb" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041557 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="sbdb" Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.041569 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="nbdb" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041577 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="nbdb" Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.041590 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovnkube-controller" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041598 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovnkube-controller" Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.041608 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="kubecfg-setup" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041616 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="kubecfg-setup" Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.041626 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="northd" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041634 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="northd" Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.041644 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovn-controller" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041652 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovn-controller" Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.041665 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovnkube-controller" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041673 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovnkube-controller" Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.041684 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovnkube-controller" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041692 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovnkube-controller" Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.041701 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovn-acl-logging" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041728 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovn-acl-logging" Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.041738 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="kube-rbac-proxy-node" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041747 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="kube-rbac-proxy-node" Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.041759 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovnkube-controller" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041767 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovnkube-controller" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041872 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="northd" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041882 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="sbdb" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041895 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovnkube-controller" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041908 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovnkube-controller" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041920 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovn-controller" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041934 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="kube-rbac-proxy-node" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041944 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="nbdb" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041955 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovn-acl-logging" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041965 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovnkube-controller" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.041973 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.042186 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovnkube-controller" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.042201 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" containerName="ovnkube-controller" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.044100 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.113658 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-ovn-node-metrics-cert\") pod \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.113728 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-run-ovn-kubernetes\") pod \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.113776 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8lg5r\" (UniqueName: \"kubernetes.io/projected/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-kube-api-access-8lg5r\") pod \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.113799 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-node-log\") pod \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.113826 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-run-ovn\") pod \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.113859 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-ovnkube-config\") pod \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.113884 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-run-netns\") pod \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.113907 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-ovnkube-script-lib\") pod \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.113936 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-kubelet\") pod \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.113957 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-etc-openvswitch\") pod \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.113984 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-systemd-units\") pod \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.114014 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-var-lib-cni-networks-ovn-kubernetes\") pod \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.114035 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-log-socket\") pod \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.114058 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-env-overrides\") pod \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.114079 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-cni-bin\") pod \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.114109 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-cni-netd\") pod \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.114134 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-var-lib-openvswitch\") pod \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.114156 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-slash\") pod \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.114177 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-run-openvswitch\") pod \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.114208 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-run-systemd\") pod \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\" (UID: \"0f81194f-4d48-4be6-9f73-8b34ed6b56cc\") " Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.114355 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2aa8dc57-e9eb-4d43-adde-383259f98889-ovnkube-config\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.114393 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-var-lib-openvswitch\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.114426 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2aa8dc57-e9eb-4d43-adde-383259f98889-ovn-node-metrics-cert\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.114461 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-run-ovn\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.114499 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "0f81194f-4d48-4be6-9f73-8b34ed6b56cc" (UID: "0f81194f-4d48-4be6-9f73-8b34ed6b56cc"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.114542 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "0f81194f-4d48-4be6-9f73-8b34ed6b56cc" (UID: "0f81194f-4d48-4be6-9f73-8b34ed6b56cc"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.114580 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "0f81194f-4d48-4be6-9f73-8b34ed6b56cc" (UID: "0f81194f-4d48-4be6-9f73-8b34ed6b56cc"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.114584 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "0f81194f-4d48-4be6-9f73-8b34ed6b56cc" (UID: "0f81194f-4d48-4be6-9f73-8b34ed6b56cc"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.114611 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-log-socket" (OuterVolumeSpecName: "log-socket") pod "0f81194f-4d48-4be6-9f73-8b34ed6b56cc" (UID: "0f81194f-4d48-4be6-9f73-8b34ed6b56cc"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.114974 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-slash" (OuterVolumeSpecName: "host-slash") pod "0f81194f-4d48-4be6-9f73-8b34ed6b56cc" (UID: "0f81194f-4d48-4be6-9f73-8b34ed6b56cc"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115030 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "0f81194f-4d48-4be6-9f73-8b34ed6b56cc" (UID: "0f81194f-4d48-4be6-9f73-8b34ed6b56cc"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115040 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "0f81194f-4d48-4be6-9f73-8b34ed6b56cc" (UID: "0f81194f-4d48-4be6-9f73-8b34ed6b56cc"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115030 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "0f81194f-4d48-4be6-9f73-8b34ed6b56cc" (UID: "0f81194f-4d48-4be6-9f73-8b34ed6b56cc"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115089 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-node-log" (OuterVolumeSpecName: "node-log") pod "0f81194f-4d48-4be6-9f73-8b34ed6b56cc" (UID: "0f81194f-4d48-4be6-9f73-8b34ed6b56cc"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115105 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "0f81194f-4d48-4be6-9f73-8b34ed6b56cc" (UID: "0f81194f-4d48-4be6-9f73-8b34ed6b56cc"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115118 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "0f81194f-4d48-4be6-9f73-8b34ed6b56cc" (UID: "0f81194f-4d48-4be6-9f73-8b34ed6b56cc"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115132 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "0f81194f-4d48-4be6-9f73-8b34ed6b56cc" (UID: "0f81194f-4d48-4be6-9f73-8b34ed6b56cc"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115139 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "0f81194f-4d48-4be6-9f73-8b34ed6b56cc" (UID: "0f81194f-4d48-4be6-9f73-8b34ed6b56cc"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115154 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "0f81194f-4d48-4be6-9f73-8b34ed6b56cc" (UID: "0f81194f-4d48-4be6-9f73-8b34ed6b56cc"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115117 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "0f81194f-4d48-4be6-9f73-8b34ed6b56cc" (UID: "0f81194f-4d48-4be6-9f73-8b34ed6b56cc"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115142 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "0f81194f-4d48-4be6-9f73-8b34ed6b56cc" (UID: "0f81194f-4d48-4be6-9f73-8b34ed6b56cc"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115336 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-run-openvswitch\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115444 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-node-log\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115528 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-slash\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115578 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2aa8dc57-e9eb-4d43-adde-383259f98889-env-overrides\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115665 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-log-socket\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115774 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-systemd-units\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115820 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-run-ovn-kubernetes\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115877 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-run-netns\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115912 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/2aa8dc57-e9eb-4d43-adde-383259f98889-ovnkube-script-lib\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115947 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-cni-bin\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.115984 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-cni-netd\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116031 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvltb\" (UniqueName: \"kubernetes.io/projected/2aa8dc57-e9eb-4d43-adde-383259f98889-kube-api-access-xvltb\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116097 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-kubelet\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116140 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-etc-openvswitch\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116178 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116209 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-run-systemd\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116291 5045 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116330 5045 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-node-log\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116378 5045 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116398 5045 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116417 5045 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116434 5045 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116451 5045 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116466 5045 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116478 5045 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116492 5045 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116508 5045 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116520 5045 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-log-socket\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116531 5045 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116542 5045 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116555 5045 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116568 5045 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-host-slash\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.116583 5045 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.122296 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-kube-api-access-8lg5r" (OuterVolumeSpecName: "kube-api-access-8lg5r") pod "0f81194f-4d48-4be6-9f73-8b34ed6b56cc" (UID: "0f81194f-4d48-4be6-9f73-8b34ed6b56cc"). InnerVolumeSpecName "kube-api-access-8lg5r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.122540 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "0f81194f-4d48-4be6-9f73-8b34ed6b56cc" (UID: "0f81194f-4d48-4be6-9f73-8b34ed6b56cc"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.140435 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "0f81194f-4d48-4be6-9f73-8b34ed6b56cc" (UID: "0f81194f-4d48-4be6-9f73-8b34ed6b56cc"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.217895 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-slash\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.217983 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2aa8dc57-e9eb-4d43-adde-383259f98889-env-overrides\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218032 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-log-socket\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218072 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-systemd-units\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218108 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-run-ovn-kubernetes\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218112 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-slash\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218148 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-run-netns\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218182 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/2aa8dc57-e9eb-4d43-adde-383259f98889-ovnkube-script-lib\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218217 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-cni-bin\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218219 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-run-ovn-kubernetes\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218220 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-log-socket\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218276 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-run-netns\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218295 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-cni-netd\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218248 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-cni-netd\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218219 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-systemd-units\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218372 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvltb\" (UniqueName: \"kubernetes.io/projected/2aa8dc57-e9eb-4d43-adde-383259f98889-kube-api-access-xvltb\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218288 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-cni-bin\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218422 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-kubelet\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218450 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-etc-openvswitch\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218479 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-etc-openvswitch\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218522 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218569 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-run-systemd\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218591 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-kubelet\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218604 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2aa8dc57-e9eb-4d43-adde-383259f98889-ovnkube-config\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218669 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218679 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-var-lib-openvswitch\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218761 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-var-lib-openvswitch\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218705 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-run-systemd\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218863 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2aa8dc57-e9eb-4d43-adde-383259f98889-ovn-node-metrics-cert\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.218923 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-run-ovn\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.219009 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-run-openvswitch\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.219068 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-run-ovn\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.219126 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-run-openvswitch\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.219138 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/2aa8dc57-e9eb-4d43-adde-383259f98889-ovnkube-script-lib\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.219136 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-node-log\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.219073 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/2aa8dc57-e9eb-4d43-adde-383259f98889-node-log\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.219168 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2aa8dc57-e9eb-4d43-adde-383259f98889-env-overrides\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.219260 5045 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.219288 5045 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.219311 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8lg5r\" (UniqueName: \"kubernetes.io/projected/0f81194f-4d48-4be6-9f73-8b34ed6b56cc-kube-api-access-8lg5r\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.219486 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2aa8dc57-e9eb-4d43-adde-383259f98889-ovnkube-config\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.222838 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2aa8dc57-e9eb-4d43-adde-383259f98889-ovn-node-metrics-cert\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.244112 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvltb\" (UniqueName: \"kubernetes.io/projected/2aa8dc57-e9eb-4d43-adde-383259f98889-kube-api-access-xvltb\") pod \"ovnkube-node-kzhk6\" (UID: \"2aa8dc57-e9eb-4d43-adde-383259f98889\") " pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.363180 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.582964 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" event={"ID":"2aa8dc57-e9eb-4d43-adde-383259f98889","Type":"ContainerStarted","Data":"46ba4d9188a7c65ad76a3f0c64339ed08adc160a9afcde7bc259bde8cb63dbde"} Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.589640 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovn-acl-logging/0.log" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.590294 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mrsr4_0f81194f-4d48-4be6-9f73-8b34ed6b56cc/ovn-controller/0.log" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.590929 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" event={"ID":"0f81194f-4d48-4be6-9f73-8b34ed6b56cc","Type":"ContainerDied","Data":"4e06634f4f6504206e000ccc570179980ad5ad973bbf9854a7530a095b0232a4"} Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.590991 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mrsr4" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.591045 5045 scope.go:117] "RemoveContainer" containerID="465de3e412536c5c0b9115d77b543fd35888b34050cc3ab59f7c63fed4418e93" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.592940 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-ht6dm_e971a47d-97d5-4a21-a255-2497b2b3cbbc/kube-multus/2.log" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.627952 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mrsr4"] Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.634897 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mrsr4"] Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.636261 5045 scope.go:117] "RemoveContainer" containerID="3335d195acd8212b04154031470185b4212a3b9ea685bca35b665e4c912aad79" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.694105 5045 scope.go:117] "RemoveContainer" containerID="ec37093820a6ba9c4edd4d86b949aa556f1a7e83ef91ee051aca643e3ce7afed" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.718675 5045 scope.go:117] "RemoveContainer" containerID="52a42bfe5858e4b394e1527c27a96f6e5350c3b772acdd06685482f78ca026a5" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.756401 5045 scope.go:117] "RemoveContainer" containerID="4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.775273 5045 scope.go:117] "RemoveContainer" containerID="32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.780806 5045 scope.go:117] "RemoveContainer" containerID="3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.799520 5045 scope.go:117] "RemoveContainer" containerID="62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.811005 5045 scope.go:117] "RemoveContainer" containerID="e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.823333 5045 scope.go:117] "RemoveContainer" containerID="4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef" Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.824114 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\": container with ID starting with 4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef not found: ID does not exist" containerID="4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef" Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.824182 5045 kuberuntime_gc.go:150] "Failed to remove container" err="failed to get container status \"4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\": rpc error: code = NotFound desc = could not find container \"4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef\": container with ID starting with 4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef not found: ID does not exist" containerID="4f890324083b272c0ba8825250e7e6523cd5584a5b503d39dd4fc40c08cd08ef" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.824225 5045 scope.go:117] "RemoveContainer" containerID="e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.830120 5045 scope.go:117] "RemoveContainer" containerID="32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc" Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.830252 5045 log.go:32] "RemoveContainer from runtime service failed" err="rpc error: code = Unknown desc = failed to delete container k8s_ovn-acl-logging_ovnkube-node-mrsr4_openshift-ovn-kubernetes_0f81194f-4d48-4be6-9f73-8b34ed6b56cc_0 in pod sandbox 4e06634f4f6504206e000ccc570179980ad5ad973bbf9854a7530a095b0232a4 from index: no such id: 'e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f'" containerID="e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f" Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.830335 5045 kuberuntime_gc.go:150] "Failed to remove container" err="rpc error: code = Unknown desc = failed to delete container k8s_ovn-acl-logging_ovnkube-node-mrsr4_openshift-ovn-kubernetes_0f81194f-4d48-4be6-9f73-8b34ed6b56cc_0 in pod sandbox 4e06634f4f6504206e000ccc570179980ad5ad973bbf9854a7530a095b0232a4 from index: no such id: 'e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f'" containerID="e8c75d71a98126d2db184fef9a6e947d6f51f184cce2380f35edf9ab65ea731f" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.830373 5045 scope.go:117] "RemoveContainer" containerID="3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4" Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.830492 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\": container with ID starting with 32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc not found: ID does not exist" containerID="32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.830560 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc"} err="failed to get container status \"32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\": rpc error: code = NotFound desc = could not find container \"32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc\": container with ID starting with 32d06c824f504866afc08693b9aaadfdd8b78b819cf8b7cf79fb13bb355498cc not found: ID does not exist" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.830595 5045 scope.go:117] "RemoveContainer" containerID="62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e" Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.830916 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\": container with ID starting with 62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e not found: ID does not exist" containerID="62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e" Nov 25 23:11:04 crc kubenswrapper[5045]: I1125 23:11:04.830962 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e"} err="failed to get container status \"62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\": rpc error: code = NotFound desc = could not find container \"62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e\": container with ID starting with 62906f24f07b95b5e74cf78a00c9b8d61c8f6f7caadd055e47d408abc2acbd5e not found: ID does not exist" Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.831278 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\": container with ID starting with 3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4 not found: ID does not exist" containerID="3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4" Nov 25 23:11:04 crc kubenswrapper[5045]: E1125 23:11:04.831325 5045 kuberuntime_gc.go:150] "Failed to remove container" err="failed to get container status \"3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\": rpc error: code = NotFound desc = could not find container \"3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4\": container with ID starting with 3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4 not found: ID does not exist" containerID="3184486f44b2f02b8e06ae9546e9f2db67c1e50f667546551d957d3b9b7bddb4" Nov 25 23:11:05 crc kubenswrapper[5045]: I1125 23:11:05.603034 5045 generic.go:334] "Generic (PLEG): container finished" podID="2aa8dc57-e9eb-4d43-adde-383259f98889" containerID="191377de72a7faa2d657f4614defb51f200528eb030b2a7820373d9ecae2ab60" exitCode=0 Nov 25 23:11:05 crc kubenswrapper[5045]: I1125 23:11:05.603129 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" event={"ID":"2aa8dc57-e9eb-4d43-adde-383259f98889","Type":"ContainerDied","Data":"191377de72a7faa2d657f4614defb51f200528eb030b2a7820373d9ecae2ab60"} Nov 25 23:11:06 crc kubenswrapper[5045]: I1125 23:11:06.414110 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f81194f-4d48-4be6-9f73-8b34ed6b56cc" path="/var/lib/kubelet/pods/0f81194f-4d48-4be6-9f73-8b34ed6b56cc/volumes" Nov 25 23:11:06 crc kubenswrapper[5045]: I1125 23:11:06.617197 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" event={"ID":"2aa8dc57-e9eb-4d43-adde-383259f98889","Type":"ContainerStarted","Data":"ebe498ef5a32869183f441193cb94f6c6521711228c3f99a60053e5b8c57e862"} Nov 25 23:11:06 crc kubenswrapper[5045]: I1125 23:11:06.617263 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" event={"ID":"2aa8dc57-e9eb-4d43-adde-383259f98889","Type":"ContainerStarted","Data":"8ecede105618d6ceeb50abf998fd2c02b089b2bc8c1d63746883d5121d4bc179"} Nov 25 23:11:07 crc kubenswrapper[5045]: I1125 23:11:07.633450 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" event={"ID":"2aa8dc57-e9eb-4d43-adde-383259f98889","Type":"ContainerStarted","Data":"d9b74572341724a994d52ed5abaa4208aefd51b2e8eda3b15fde6f22d90df830"} Nov 25 23:11:07 crc kubenswrapper[5045]: I1125 23:11:07.633982 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" event={"ID":"2aa8dc57-e9eb-4d43-adde-383259f98889","Type":"ContainerStarted","Data":"491deb4cbdb8d0cb9b5b19c51ec0f921f7737d3dbcc868a65602de3f084440da"} Nov 25 23:11:07 crc kubenswrapper[5045]: I1125 23:11:07.634020 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" event={"ID":"2aa8dc57-e9eb-4d43-adde-383259f98889","Type":"ContainerStarted","Data":"c0d7e592eec3bac7382301247a3239c64ba79c65f9ccd5745c95a355fce52fbf"} Nov 25 23:11:07 crc kubenswrapper[5045]: I1125 23:11:07.634049 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" event={"ID":"2aa8dc57-e9eb-4d43-adde-383259f98889","Type":"ContainerStarted","Data":"aefb2cdbadfb74a9c9dabf5be100fad8cd0f7a3f38058c64ee91ebe1e77135e2"} Nov 25 23:11:10 crc kubenswrapper[5045]: I1125 23:11:10.663339 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" event={"ID":"2aa8dc57-e9eb-4d43-adde-383259f98889","Type":"ContainerStarted","Data":"d2641e161b5050d2d6643a909a8177020616c60c6b808b5ac4517227826cce7c"} Nov 25 23:11:12 crc kubenswrapper[5045]: I1125 23:11:12.685040 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" event={"ID":"2aa8dc57-e9eb-4d43-adde-383259f98889","Type":"ContainerStarted","Data":"17bc2d49ec52b0a1648301a9ed212ee7c28efb2a04d7614d17984a4b43b01873"} Nov 25 23:11:13 crc kubenswrapper[5045]: I1125 23:11:13.690070 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:13 crc kubenswrapper[5045]: I1125 23:11:13.690389 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:13 crc kubenswrapper[5045]: I1125 23:11:13.690403 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:13 crc kubenswrapper[5045]: I1125 23:11:13.717770 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:13 crc kubenswrapper[5045]: I1125 23:11:13.728149 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" podStartSLOduration=9.728121414 podStartE2EDuration="9.728121414s" podCreationTimestamp="2025-11-25 23:11:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:11:13.722280677 +0000 UTC m=+730.079939799" watchObservedRunningTime="2025-11-25 23:11:13.728121414 +0000 UTC m=+730.085780556" Nov 25 23:11:13 crc kubenswrapper[5045]: I1125 23:11:13.731336 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:14 crc kubenswrapper[5045]: I1125 23:11:14.401149 5045 scope.go:117] "RemoveContainer" containerID="864184c90b5847350d56d3814873c9e37d409583f933093bca4086657a5f1b17" Nov 25 23:11:14 crc kubenswrapper[5045]: E1125 23:11:14.401594 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-ht6dm_openshift-multus(e971a47d-97d5-4a21-a255-2497b2b3cbbc)\"" pod="openshift-multus/multus-ht6dm" podUID="e971a47d-97d5-4a21-a255-2497b2b3cbbc" Nov 25 23:11:26 crc kubenswrapper[5045]: I1125 23:11:26.397636 5045 scope.go:117] "RemoveContainer" containerID="864184c90b5847350d56d3814873c9e37d409583f933093bca4086657a5f1b17" Nov 25 23:11:27 crc kubenswrapper[5045]: I1125 23:11:27.286820 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-ht6dm_e971a47d-97d5-4a21-a255-2497b2b3cbbc/kube-multus/2.log" Nov 25 23:11:28 crc kubenswrapper[5045]: I1125 23:11:28.297865 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-ht6dm_e971a47d-97d5-4a21-a255-2497b2b3cbbc/kube-multus/2.log" Nov 25 23:11:28 crc kubenswrapper[5045]: I1125 23:11:28.298140 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-ht6dm" event={"ID":"e971a47d-97d5-4a21-a255-2497b2b3cbbc","Type":"ContainerStarted","Data":"8d97ab3175601e9fa32ab0e1c23f944b1ce40c762c252afbebcb5e7a32a1a657"} Nov 25 23:11:30 crc kubenswrapper[5045]: I1125 23:11:30.540824 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:11:30 crc kubenswrapper[5045]: I1125 23:11:30.541306 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:11:34 crc kubenswrapper[5045]: I1125 23:11:34.405751 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-kzhk6" Nov 25 23:11:41 crc kubenswrapper[5045]: I1125 23:11:41.077774 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2"] Nov 25 23:11:41 crc kubenswrapper[5045]: I1125 23:11:41.079359 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2" Nov 25 23:11:41 crc kubenswrapper[5045]: I1125 23:11:41.081344 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 23:11:41 crc kubenswrapper[5045]: I1125 23:11:41.086902 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2"] Nov 25 23:11:41 crc kubenswrapper[5045]: I1125 23:11:41.193850 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2fc73dbf-cedf-448c-9c04-bd1db878524a-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2\" (UID: \"2fc73dbf-cedf-448c-9c04-bd1db878524a\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2" Nov 25 23:11:41 crc kubenswrapper[5045]: I1125 23:11:41.193923 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2fc73dbf-cedf-448c-9c04-bd1db878524a-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2\" (UID: \"2fc73dbf-cedf-448c-9c04-bd1db878524a\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2" Nov 25 23:11:41 crc kubenswrapper[5045]: I1125 23:11:41.193994 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lcrj8\" (UniqueName: \"kubernetes.io/projected/2fc73dbf-cedf-448c-9c04-bd1db878524a-kube-api-access-lcrj8\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2\" (UID: \"2fc73dbf-cedf-448c-9c04-bd1db878524a\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2" Nov 25 23:11:41 crc kubenswrapper[5045]: I1125 23:11:41.295230 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2fc73dbf-cedf-448c-9c04-bd1db878524a-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2\" (UID: \"2fc73dbf-cedf-448c-9c04-bd1db878524a\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2" Nov 25 23:11:41 crc kubenswrapper[5045]: I1125 23:11:41.295303 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2fc73dbf-cedf-448c-9c04-bd1db878524a-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2\" (UID: \"2fc73dbf-cedf-448c-9c04-bd1db878524a\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2" Nov 25 23:11:41 crc kubenswrapper[5045]: I1125 23:11:41.295392 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lcrj8\" (UniqueName: \"kubernetes.io/projected/2fc73dbf-cedf-448c-9c04-bd1db878524a-kube-api-access-lcrj8\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2\" (UID: \"2fc73dbf-cedf-448c-9c04-bd1db878524a\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2" Nov 25 23:11:41 crc kubenswrapper[5045]: I1125 23:11:41.296111 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2fc73dbf-cedf-448c-9c04-bd1db878524a-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2\" (UID: \"2fc73dbf-cedf-448c-9c04-bd1db878524a\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2" Nov 25 23:11:41 crc kubenswrapper[5045]: I1125 23:11:41.296269 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2fc73dbf-cedf-448c-9c04-bd1db878524a-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2\" (UID: \"2fc73dbf-cedf-448c-9c04-bd1db878524a\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2" Nov 25 23:11:41 crc kubenswrapper[5045]: I1125 23:11:41.342144 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lcrj8\" (UniqueName: \"kubernetes.io/projected/2fc73dbf-cedf-448c-9c04-bd1db878524a-kube-api-access-lcrj8\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2\" (UID: \"2fc73dbf-cedf-448c-9c04-bd1db878524a\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2" Nov 25 23:11:41 crc kubenswrapper[5045]: I1125 23:11:41.392255 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2" Nov 25 23:11:41 crc kubenswrapper[5045]: I1125 23:11:41.667090 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2"] Nov 25 23:11:42 crc kubenswrapper[5045]: I1125 23:11:42.401101 5045 generic.go:334] "Generic (PLEG): container finished" podID="2fc73dbf-cedf-448c-9c04-bd1db878524a" containerID="432fd0ffdb8749c047e7dea7e79b43ea9ad2a284c715ecab4e25f1d7e11d50c7" exitCode=0 Nov 25 23:11:42 crc kubenswrapper[5045]: I1125 23:11:42.417954 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2" event={"ID":"2fc73dbf-cedf-448c-9c04-bd1db878524a","Type":"ContainerDied","Data":"432fd0ffdb8749c047e7dea7e79b43ea9ad2a284c715ecab4e25f1d7e11d50c7"} Nov 25 23:11:42 crc kubenswrapper[5045]: I1125 23:11:42.418008 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2" event={"ID":"2fc73dbf-cedf-448c-9c04-bd1db878524a","Type":"ContainerStarted","Data":"7e05e62352c5fb50640f4137e974d0a6fd868bfb097b306ab28bcd80cceb63cd"} Nov 25 23:11:44 crc kubenswrapper[5045]: I1125 23:11:44.417182 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2" event={"ID":"2fc73dbf-cedf-448c-9c04-bd1db878524a","Type":"ContainerStarted","Data":"f4b9f3e4ab8c27fe55938cda013c103b1bfbecb263b244092ee582a700452cf6"} Nov 25 23:11:45 crc kubenswrapper[5045]: I1125 23:11:45.427140 5045 generic.go:334] "Generic (PLEG): container finished" podID="2fc73dbf-cedf-448c-9c04-bd1db878524a" containerID="f4b9f3e4ab8c27fe55938cda013c103b1bfbecb263b244092ee582a700452cf6" exitCode=0 Nov 25 23:11:45 crc kubenswrapper[5045]: I1125 23:11:45.427203 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2" event={"ID":"2fc73dbf-cedf-448c-9c04-bd1db878524a","Type":"ContainerDied","Data":"f4b9f3e4ab8c27fe55938cda013c103b1bfbecb263b244092ee582a700452cf6"} Nov 25 23:11:46 crc kubenswrapper[5045]: I1125 23:11:46.435371 5045 generic.go:334] "Generic (PLEG): container finished" podID="2fc73dbf-cedf-448c-9c04-bd1db878524a" containerID="19148dca3ff25d37899ea4994a90f453b5601c7dedd56613f6b6b44fd0bd094e" exitCode=0 Nov 25 23:11:46 crc kubenswrapper[5045]: I1125 23:11:46.435450 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2" event={"ID":"2fc73dbf-cedf-448c-9c04-bd1db878524a","Type":"ContainerDied","Data":"19148dca3ff25d37899ea4994a90f453b5601c7dedd56613f6b6b44fd0bd094e"} Nov 25 23:11:47 crc kubenswrapper[5045]: I1125 23:11:47.772525 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2" Nov 25 23:11:47 crc kubenswrapper[5045]: I1125 23:11:47.903291 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lcrj8\" (UniqueName: \"kubernetes.io/projected/2fc73dbf-cedf-448c-9c04-bd1db878524a-kube-api-access-lcrj8\") pod \"2fc73dbf-cedf-448c-9c04-bd1db878524a\" (UID: \"2fc73dbf-cedf-448c-9c04-bd1db878524a\") " Nov 25 23:11:47 crc kubenswrapper[5045]: I1125 23:11:47.903374 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2fc73dbf-cedf-448c-9c04-bd1db878524a-util\") pod \"2fc73dbf-cedf-448c-9c04-bd1db878524a\" (UID: \"2fc73dbf-cedf-448c-9c04-bd1db878524a\") " Nov 25 23:11:47 crc kubenswrapper[5045]: I1125 23:11:47.903456 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2fc73dbf-cedf-448c-9c04-bd1db878524a-bundle\") pod \"2fc73dbf-cedf-448c-9c04-bd1db878524a\" (UID: \"2fc73dbf-cedf-448c-9c04-bd1db878524a\") " Nov 25 23:11:47 crc kubenswrapper[5045]: I1125 23:11:47.904908 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2fc73dbf-cedf-448c-9c04-bd1db878524a-bundle" (OuterVolumeSpecName: "bundle") pod "2fc73dbf-cedf-448c-9c04-bd1db878524a" (UID: "2fc73dbf-cedf-448c-9c04-bd1db878524a"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:11:47 crc kubenswrapper[5045]: I1125 23:11:47.912661 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fc73dbf-cedf-448c-9c04-bd1db878524a-kube-api-access-lcrj8" (OuterVolumeSpecName: "kube-api-access-lcrj8") pod "2fc73dbf-cedf-448c-9c04-bd1db878524a" (UID: "2fc73dbf-cedf-448c-9c04-bd1db878524a"). InnerVolumeSpecName "kube-api-access-lcrj8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:11:47 crc kubenswrapper[5045]: I1125 23:11:47.924824 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2fc73dbf-cedf-448c-9c04-bd1db878524a-util" (OuterVolumeSpecName: "util") pod "2fc73dbf-cedf-448c-9c04-bd1db878524a" (UID: "2fc73dbf-cedf-448c-9c04-bd1db878524a"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:11:47 crc kubenswrapper[5045]: I1125 23:11:47.978557 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-c529q"] Nov 25 23:11:47 crc kubenswrapper[5045]: E1125 23:11:47.979019 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fc73dbf-cedf-448c-9c04-bd1db878524a" containerName="util" Nov 25 23:11:47 crc kubenswrapper[5045]: I1125 23:11:47.979065 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fc73dbf-cedf-448c-9c04-bd1db878524a" containerName="util" Nov 25 23:11:47 crc kubenswrapper[5045]: E1125 23:11:47.979100 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fc73dbf-cedf-448c-9c04-bd1db878524a" containerName="extract" Nov 25 23:11:47 crc kubenswrapper[5045]: I1125 23:11:47.979117 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fc73dbf-cedf-448c-9c04-bd1db878524a" containerName="extract" Nov 25 23:11:47 crc kubenswrapper[5045]: E1125 23:11:47.979139 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fc73dbf-cedf-448c-9c04-bd1db878524a" containerName="pull" Nov 25 23:11:47 crc kubenswrapper[5045]: I1125 23:11:47.979158 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fc73dbf-cedf-448c-9c04-bd1db878524a" containerName="pull" Nov 25 23:11:47 crc kubenswrapper[5045]: I1125 23:11:47.979390 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fc73dbf-cedf-448c-9c04-bd1db878524a" containerName="extract" Nov 25 23:11:47 crc kubenswrapper[5045]: I1125 23:11:47.981212 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c529q" Nov 25 23:11:47 crc kubenswrapper[5045]: I1125 23:11:47.993194 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c529q"] Nov 25 23:11:48 crc kubenswrapper[5045]: I1125 23:11:48.005632 5045 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2fc73dbf-cedf-448c-9c04-bd1db878524a-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:48 crc kubenswrapper[5045]: I1125 23:11:48.005680 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lcrj8\" (UniqueName: \"kubernetes.io/projected/2fc73dbf-cedf-448c-9c04-bd1db878524a-kube-api-access-lcrj8\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:48 crc kubenswrapper[5045]: I1125 23:11:48.005708 5045 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2fc73dbf-cedf-448c-9c04-bd1db878524a-util\") on node \"crc\" DevicePath \"\"" Nov 25 23:11:48 crc kubenswrapper[5045]: I1125 23:11:48.106562 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jnb4q\" (UniqueName: \"kubernetes.io/projected/091d80d3-9ffe-4888-a892-b2b0726b0ceb-kube-api-access-jnb4q\") pod \"redhat-operators-c529q\" (UID: \"091d80d3-9ffe-4888-a892-b2b0726b0ceb\") " pod="openshift-marketplace/redhat-operators-c529q" Nov 25 23:11:48 crc kubenswrapper[5045]: I1125 23:11:48.107201 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/091d80d3-9ffe-4888-a892-b2b0726b0ceb-utilities\") pod \"redhat-operators-c529q\" (UID: \"091d80d3-9ffe-4888-a892-b2b0726b0ceb\") " pod="openshift-marketplace/redhat-operators-c529q" Nov 25 23:11:48 crc kubenswrapper[5045]: I1125 23:11:48.107472 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/091d80d3-9ffe-4888-a892-b2b0726b0ceb-catalog-content\") pod \"redhat-operators-c529q\" (UID: \"091d80d3-9ffe-4888-a892-b2b0726b0ceb\") " pod="openshift-marketplace/redhat-operators-c529q" Nov 25 23:11:48 crc kubenswrapper[5045]: I1125 23:11:48.209257 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/091d80d3-9ffe-4888-a892-b2b0726b0ceb-utilities\") pod \"redhat-operators-c529q\" (UID: \"091d80d3-9ffe-4888-a892-b2b0726b0ceb\") " pod="openshift-marketplace/redhat-operators-c529q" Nov 25 23:11:48 crc kubenswrapper[5045]: I1125 23:11:48.209667 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/091d80d3-9ffe-4888-a892-b2b0726b0ceb-catalog-content\") pod \"redhat-operators-c529q\" (UID: \"091d80d3-9ffe-4888-a892-b2b0726b0ceb\") " pod="openshift-marketplace/redhat-operators-c529q" Nov 25 23:11:48 crc kubenswrapper[5045]: I1125 23:11:48.209910 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/091d80d3-9ffe-4888-a892-b2b0726b0ceb-utilities\") pod \"redhat-operators-c529q\" (UID: \"091d80d3-9ffe-4888-a892-b2b0726b0ceb\") " pod="openshift-marketplace/redhat-operators-c529q" Nov 25 23:11:48 crc kubenswrapper[5045]: I1125 23:11:48.210159 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jnb4q\" (UniqueName: \"kubernetes.io/projected/091d80d3-9ffe-4888-a892-b2b0726b0ceb-kube-api-access-jnb4q\") pod \"redhat-operators-c529q\" (UID: \"091d80d3-9ffe-4888-a892-b2b0726b0ceb\") " pod="openshift-marketplace/redhat-operators-c529q" Nov 25 23:11:48 crc kubenswrapper[5045]: I1125 23:11:48.210375 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/091d80d3-9ffe-4888-a892-b2b0726b0ceb-catalog-content\") pod \"redhat-operators-c529q\" (UID: \"091d80d3-9ffe-4888-a892-b2b0726b0ceb\") " pod="openshift-marketplace/redhat-operators-c529q" Nov 25 23:11:48 crc kubenswrapper[5045]: I1125 23:11:48.242390 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jnb4q\" (UniqueName: \"kubernetes.io/projected/091d80d3-9ffe-4888-a892-b2b0726b0ceb-kube-api-access-jnb4q\") pod \"redhat-operators-c529q\" (UID: \"091d80d3-9ffe-4888-a892-b2b0726b0ceb\") " pod="openshift-marketplace/redhat-operators-c529q" Nov 25 23:11:48 crc kubenswrapper[5045]: I1125 23:11:48.310359 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c529q" Nov 25 23:11:48 crc kubenswrapper[5045]: I1125 23:11:48.450503 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2" event={"ID":"2fc73dbf-cedf-448c-9c04-bd1db878524a","Type":"ContainerDied","Data":"7e05e62352c5fb50640f4137e974d0a6fd868bfb097b306ab28bcd80cceb63cd"} Nov 25 23:11:48 crc kubenswrapper[5045]: I1125 23:11:48.450630 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7e05e62352c5fb50640f4137e974d0a6fd868bfb097b306ab28bcd80cceb63cd" Nov 25 23:11:48 crc kubenswrapper[5045]: I1125 23:11:48.450591 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2" Nov 25 23:11:48 crc kubenswrapper[5045]: I1125 23:11:48.559602 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c529q"] Nov 25 23:11:49 crc kubenswrapper[5045]: I1125 23:11:49.460420 5045 generic.go:334] "Generic (PLEG): container finished" podID="091d80d3-9ffe-4888-a892-b2b0726b0ceb" containerID="a2019bef214b8b4b12d25145405570bbff2d73c722654f2244f6a70bfa456a06" exitCode=0 Nov 25 23:11:49 crc kubenswrapper[5045]: I1125 23:11:49.460493 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c529q" event={"ID":"091d80d3-9ffe-4888-a892-b2b0726b0ceb","Type":"ContainerDied","Data":"a2019bef214b8b4b12d25145405570bbff2d73c722654f2244f6a70bfa456a06"} Nov 25 23:11:49 crc kubenswrapper[5045]: I1125 23:11:49.460535 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c529q" event={"ID":"091d80d3-9ffe-4888-a892-b2b0726b0ceb","Type":"ContainerStarted","Data":"786659f37070409652584ffc4b894f1062fdd6d91d558a08c8afcd62b843f626"} Nov 25 23:11:50 crc kubenswrapper[5045]: I1125 23:11:50.031827 5045 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 23:11:50 crc kubenswrapper[5045]: I1125 23:11:50.467893 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c529q" event={"ID":"091d80d3-9ffe-4888-a892-b2b0726b0ceb","Type":"ContainerStarted","Data":"b4b3e201da997bedb72a2b72d7b50db47027137da7ea54a040ae1017b9da82eb"} Nov 25 23:11:51 crc kubenswrapper[5045]: I1125 23:11:51.474151 5045 generic.go:334] "Generic (PLEG): container finished" podID="091d80d3-9ffe-4888-a892-b2b0726b0ceb" containerID="b4b3e201da997bedb72a2b72d7b50db47027137da7ea54a040ae1017b9da82eb" exitCode=0 Nov 25 23:11:51 crc kubenswrapper[5045]: I1125 23:11:51.474188 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c529q" event={"ID":"091d80d3-9ffe-4888-a892-b2b0726b0ceb","Type":"ContainerDied","Data":"b4b3e201da997bedb72a2b72d7b50db47027137da7ea54a040ae1017b9da82eb"} Nov 25 23:11:51 crc kubenswrapper[5045]: I1125 23:11:51.609990 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-cn5kf"] Nov 25 23:11:51 crc kubenswrapper[5045]: I1125 23:11:51.610873 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-cn5kf" Nov 25 23:11:51 crc kubenswrapper[5045]: I1125 23:11:51.612579 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 25 23:11:51 crc kubenswrapper[5045]: I1125 23:11:51.612615 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-hwpq8" Nov 25 23:11:51 crc kubenswrapper[5045]: I1125 23:11:51.613142 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 25 23:11:51 crc kubenswrapper[5045]: I1125 23:11:51.619174 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-cn5kf"] Nov 25 23:11:51 crc kubenswrapper[5045]: I1125 23:11:51.751152 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74qfz\" (UniqueName: \"kubernetes.io/projected/64863e5b-fa79-4a6f-af83-631dafa8a1c4-kube-api-access-74qfz\") pod \"nmstate-operator-557fdffb88-cn5kf\" (UID: \"64863e5b-fa79-4a6f-af83-631dafa8a1c4\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-cn5kf" Nov 25 23:11:51 crc kubenswrapper[5045]: I1125 23:11:51.852648 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74qfz\" (UniqueName: \"kubernetes.io/projected/64863e5b-fa79-4a6f-af83-631dafa8a1c4-kube-api-access-74qfz\") pod \"nmstate-operator-557fdffb88-cn5kf\" (UID: \"64863e5b-fa79-4a6f-af83-631dafa8a1c4\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-cn5kf" Nov 25 23:11:51 crc kubenswrapper[5045]: I1125 23:11:51.879520 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74qfz\" (UniqueName: \"kubernetes.io/projected/64863e5b-fa79-4a6f-af83-631dafa8a1c4-kube-api-access-74qfz\") pod \"nmstate-operator-557fdffb88-cn5kf\" (UID: \"64863e5b-fa79-4a6f-af83-631dafa8a1c4\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-cn5kf" Nov 25 23:11:51 crc kubenswrapper[5045]: I1125 23:11:51.923224 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-cn5kf" Nov 25 23:11:52 crc kubenswrapper[5045]: I1125 23:11:52.205971 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-cn5kf"] Nov 25 23:11:52 crc kubenswrapper[5045]: I1125 23:11:52.484250 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c529q" event={"ID":"091d80d3-9ffe-4888-a892-b2b0726b0ceb","Type":"ContainerStarted","Data":"ff68b3646dbc40df8caeaa6b166bc0322681965de3b0b42d08d74dd5f13c849a"} Nov 25 23:11:52 crc kubenswrapper[5045]: I1125 23:11:52.485512 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-cn5kf" event={"ID":"64863e5b-fa79-4a6f-af83-631dafa8a1c4","Type":"ContainerStarted","Data":"c1049fd40df66904f548aa7debe289abe83abca28984ee409476836360fac2bc"} Nov 25 23:11:52 crc kubenswrapper[5045]: I1125 23:11:52.515537 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-c529q" podStartSLOduration=3.088951873 podStartE2EDuration="5.515510563s" podCreationTimestamp="2025-11-25 23:11:47 +0000 UTC" firstStartedPulling="2025-11-25 23:11:49.464085444 +0000 UTC m=+765.821744606" lastFinishedPulling="2025-11-25 23:11:51.890644174 +0000 UTC m=+768.248303296" observedRunningTime="2025-11-25 23:11:52.512226009 +0000 UTC m=+768.869885121" watchObservedRunningTime="2025-11-25 23:11:52.515510563 +0000 UTC m=+768.873169715" Nov 25 23:11:54 crc kubenswrapper[5045]: I1125 23:11:54.503487 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-cn5kf" event={"ID":"64863e5b-fa79-4a6f-af83-631dafa8a1c4","Type":"ContainerStarted","Data":"1ebcd4ceeaf6f93bf789d9d2e63cce9ebc74f1c667a37a9ca4bd0f06b2fc3e26"} Nov 25 23:11:54 crc kubenswrapper[5045]: I1125 23:11:54.517627 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-cn5kf" podStartSLOduration=1.440609129 podStartE2EDuration="3.517614027s" podCreationTimestamp="2025-11-25 23:11:51 +0000 UTC" firstStartedPulling="2025-11-25 23:11:52.22324079 +0000 UTC m=+768.580899902" lastFinishedPulling="2025-11-25 23:11:54.300245698 +0000 UTC m=+770.657904800" observedRunningTime="2025-11-25 23:11:54.516077583 +0000 UTC m=+770.873736715" watchObservedRunningTime="2025-11-25 23:11:54.517614027 +0000 UTC m=+770.875273139" Nov 25 23:11:58 crc kubenswrapper[5045]: I1125 23:11:58.310941 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-c529q" Nov 25 23:11:58 crc kubenswrapper[5045]: I1125 23:11:58.311295 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-c529q" Nov 25 23:11:58 crc kubenswrapper[5045]: I1125 23:11:58.362137 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-c529q" Nov 25 23:11:58 crc kubenswrapper[5045]: I1125 23:11:58.577681 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-m8t7m"] Nov 25 23:11:58 crc kubenswrapper[5045]: I1125 23:11:58.583649 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m8t7m" Nov 25 23:11:58 crc kubenswrapper[5045]: I1125 23:11:58.597701 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-m8t7m"] Nov 25 23:11:58 crc kubenswrapper[5045]: I1125 23:11:58.611185 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-c529q" Nov 25 23:11:58 crc kubenswrapper[5045]: I1125 23:11:58.637373 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5942r\" (UniqueName: \"kubernetes.io/projected/19cf4a9f-7471-453d-87e0-83adf05881b8-kube-api-access-5942r\") pod \"redhat-marketplace-m8t7m\" (UID: \"19cf4a9f-7471-453d-87e0-83adf05881b8\") " pod="openshift-marketplace/redhat-marketplace-m8t7m" Nov 25 23:11:58 crc kubenswrapper[5045]: I1125 23:11:58.637425 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19cf4a9f-7471-453d-87e0-83adf05881b8-utilities\") pod \"redhat-marketplace-m8t7m\" (UID: \"19cf4a9f-7471-453d-87e0-83adf05881b8\") " pod="openshift-marketplace/redhat-marketplace-m8t7m" Nov 25 23:11:58 crc kubenswrapper[5045]: I1125 23:11:58.637759 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19cf4a9f-7471-453d-87e0-83adf05881b8-catalog-content\") pod \"redhat-marketplace-m8t7m\" (UID: \"19cf4a9f-7471-453d-87e0-83adf05881b8\") " pod="openshift-marketplace/redhat-marketplace-m8t7m" Nov 25 23:11:58 crc kubenswrapper[5045]: I1125 23:11:58.739121 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5942r\" (UniqueName: \"kubernetes.io/projected/19cf4a9f-7471-453d-87e0-83adf05881b8-kube-api-access-5942r\") pod \"redhat-marketplace-m8t7m\" (UID: \"19cf4a9f-7471-453d-87e0-83adf05881b8\") " pod="openshift-marketplace/redhat-marketplace-m8t7m" Nov 25 23:11:58 crc kubenswrapper[5045]: I1125 23:11:58.739161 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19cf4a9f-7471-453d-87e0-83adf05881b8-utilities\") pod \"redhat-marketplace-m8t7m\" (UID: \"19cf4a9f-7471-453d-87e0-83adf05881b8\") " pod="openshift-marketplace/redhat-marketplace-m8t7m" Nov 25 23:11:58 crc kubenswrapper[5045]: I1125 23:11:58.739222 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19cf4a9f-7471-453d-87e0-83adf05881b8-catalog-content\") pod \"redhat-marketplace-m8t7m\" (UID: \"19cf4a9f-7471-453d-87e0-83adf05881b8\") " pod="openshift-marketplace/redhat-marketplace-m8t7m" Nov 25 23:11:58 crc kubenswrapper[5045]: I1125 23:11:58.739665 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19cf4a9f-7471-453d-87e0-83adf05881b8-catalog-content\") pod \"redhat-marketplace-m8t7m\" (UID: \"19cf4a9f-7471-453d-87e0-83adf05881b8\") " pod="openshift-marketplace/redhat-marketplace-m8t7m" Nov 25 23:11:58 crc kubenswrapper[5045]: I1125 23:11:58.740096 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19cf4a9f-7471-453d-87e0-83adf05881b8-utilities\") pod \"redhat-marketplace-m8t7m\" (UID: \"19cf4a9f-7471-453d-87e0-83adf05881b8\") " pod="openshift-marketplace/redhat-marketplace-m8t7m" Nov 25 23:11:58 crc kubenswrapper[5045]: I1125 23:11:58.761651 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5942r\" (UniqueName: \"kubernetes.io/projected/19cf4a9f-7471-453d-87e0-83adf05881b8-kube-api-access-5942r\") pod \"redhat-marketplace-m8t7m\" (UID: \"19cf4a9f-7471-453d-87e0-83adf05881b8\") " pod="openshift-marketplace/redhat-marketplace-m8t7m" Nov 25 23:11:58 crc kubenswrapper[5045]: I1125 23:11:58.929236 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m8t7m" Nov 25 23:11:59 crc kubenswrapper[5045]: I1125 23:11:59.397929 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-m8t7m"] Nov 25 23:11:59 crc kubenswrapper[5045]: W1125 23:11:59.406337 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod19cf4a9f_7471_453d_87e0_83adf05881b8.slice/crio-6e80c4c1cc6440c2462aaf5fc1d7914f47f1e736a89b6fe43965fe4987679413 WatchSource:0}: Error finding container 6e80c4c1cc6440c2462aaf5fc1d7914f47f1e736a89b6fe43965fe4987679413: Status 404 returned error can't find the container with id 6e80c4c1cc6440c2462aaf5fc1d7914f47f1e736a89b6fe43965fe4987679413 Nov 25 23:11:59 crc kubenswrapper[5045]: I1125 23:11:59.538074 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m8t7m" event={"ID":"19cf4a9f-7471-453d-87e0-83adf05881b8","Type":"ContainerStarted","Data":"6e80c4c1cc6440c2462aaf5fc1d7914f47f1e736a89b6fe43965fe4987679413"} Nov 25 23:12:00 crc kubenswrapper[5045]: I1125 23:12:00.540962 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:12:00 crc kubenswrapper[5045]: I1125 23:12:00.541752 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:12:00 crc kubenswrapper[5045]: I1125 23:12:00.541882 5045 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 23:12:00 crc kubenswrapper[5045]: I1125 23:12:00.543068 5045 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b92b5d9b7e6348e5c2d32780968d464c8df43db35693b94adce9b088fa04e458"} pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 23:12:00 crc kubenswrapper[5045]: I1125 23:12:00.543168 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" containerID="cri-o://b92b5d9b7e6348e5c2d32780968d464c8df43db35693b94adce9b088fa04e458" gracePeriod=600 Nov 25 23:12:00 crc kubenswrapper[5045]: I1125 23:12:00.549237 5045 generic.go:334] "Generic (PLEG): container finished" podID="19cf4a9f-7471-453d-87e0-83adf05881b8" containerID="5cad136d01d4ff4b13dede2c25653d9e40f7fe6ee885755e70622b67245f49b7" exitCode=0 Nov 25 23:12:00 crc kubenswrapper[5045]: I1125 23:12:00.549318 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m8t7m" event={"ID":"19cf4a9f-7471-453d-87e0-83adf05881b8","Type":"ContainerDied","Data":"5cad136d01d4ff4b13dede2c25653d9e40f7fe6ee885755e70622b67245f49b7"} Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.559286 5045 generic.go:334] "Generic (PLEG): container finished" podID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerID="b92b5d9b7e6348e5c2d32780968d464c8df43db35693b94adce9b088fa04e458" exitCode=0 Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.559376 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerDied","Data":"b92b5d9b7e6348e5c2d32780968d464c8df43db35693b94adce9b088fa04e458"} Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.559993 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerStarted","Data":"ac9b25e6d63635c2d6571dedc7552214abf9eda1f3832a75a6240c6a0f672f51"} Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.560020 5045 scope.go:117] "RemoveContainer" containerID="cc90319f958b5f9648cd5e926ff20552a29d869f3d08dacc8d4b7c3206f610aa" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.563299 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m8t7m" event={"ID":"19cf4a9f-7471-453d-87e0-83adf05881b8","Type":"ContainerStarted","Data":"7afe272181316a1826f3eb69eab1130c349ad9f8102022e95ae9708809c87605"} Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.585983 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-2fvh8"] Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.587025 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-2fvh8" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.590463 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-cvj2q" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.611652 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-2fvh8"] Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.616147 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-6vs48"] Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.617434 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-6vs48" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.619108 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.635799 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-gmpgg"] Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.636665 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-gmpgg" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.643486 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-6vs48"] Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.688725 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5lx8\" (UniqueName: \"kubernetes.io/projected/10586198-1de3-4da4-9ba1-b79a9785da2f-kube-api-access-s5lx8\") pod \"nmstate-metrics-5dcf9c57c5-2fvh8\" (UID: \"10586198-1de3-4da4-9ba1-b79a9785da2f\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-2fvh8" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.688786 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wj6gl\" (UniqueName: \"kubernetes.io/projected/8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73-kube-api-access-wj6gl\") pod \"nmstate-webhook-6b89b748d8-6vs48\" (UID: \"8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-6vs48" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.688897 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-6vs48\" (UID: \"8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-6vs48" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.715324 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-srctj"] Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.716261 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-srctj" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.717734 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.717923 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-zmpxz" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.720925 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.727698 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-srctj"] Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.790032 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/6d561240-e484-4a96-aff1-aef1a5c56daf-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-srctj\" (UID: \"6d561240-e484-4a96-aff1-aef1a5c56daf\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-srctj" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.790077 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-6vs48\" (UID: \"8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-6vs48" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.790099 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6d561240-e484-4a96-aff1-aef1a5c56daf-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-srctj\" (UID: \"6d561240-e484-4a96-aff1-aef1a5c56daf\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-srctj" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.790179 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5lx8\" (UniqueName: \"kubernetes.io/projected/10586198-1de3-4da4-9ba1-b79a9785da2f-kube-api-access-s5lx8\") pod \"nmstate-metrics-5dcf9c57c5-2fvh8\" (UID: \"10586198-1de3-4da4-9ba1-b79a9785da2f\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-2fvh8" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.790209 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/523926ce-7459-44b3-bc6b-03782619bc1e-ovs-socket\") pod \"nmstate-handler-gmpgg\" (UID: \"523926ce-7459-44b3-bc6b-03782619bc1e\") " pod="openshift-nmstate/nmstate-handler-gmpgg" Nov 25 23:12:01 crc kubenswrapper[5045]: E1125 23:12:01.790215 5045 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.790228 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89pfl\" (UniqueName: \"kubernetes.io/projected/6d561240-e484-4a96-aff1-aef1a5c56daf-kube-api-access-89pfl\") pod \"nmstate-console-plugin-5874bd7bc5-srctj\" (UID: \"6d561240-e484-4a96-aff1-aef1a5c56daf\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-srctj" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.790246 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wj6gl\" (UniqueName: \"kubernetes.io/projected/8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73-kube-api-access-wj6gl\") pod \"nmstate-webhook-6b89b748d8-6vs48\" (UID: \"8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-6vs48" Nov 25 23:12:01 crc kubenswrapper[5045]: E1125 23:12:01.790263 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73-tls-key-pair podName:8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73 nodeName:}" failed. No retries permitted until 2025-11-25 23:12:02.290248337 +0000 UTC m=+778.647907449 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73-tls-key-pair") pod "nmstate-webhook-6b89b748d8-6vs48" (UID: "8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73") : secret "openshift-nmstate-webhook" not found Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.790279 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/523926ce-7459-44b3-bc6b-03782619bc1e-nmstate-lock\") pod \"nmstate-handler-gmpgg\" (UID: \"523926ce-7459-44b3-bc6b-03782619bc1e\") " pod="openshift-nmstate/nmstate-handler-gmpgg" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.790303 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/523926ce-7459-44b3-bc6b-03782619bc1e-dbus-socket\") pod \"nmstate-handler-gmpgg\" (UID: \"523926ce-7459-44b3-bc6b-03782619bc1e\") " pod="openshift-nmstate/nmstate-handler-gmpgg" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.790321 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49vnh\" (UniqueName: \"kubernetes.io/projected/523926ce-7459-44b3-bc6b-03782619bc1e-kube-api-access-49vnh\") pod \"nmstate-handler-gmpgg\" (UID: \"523926ce-7459-44b3-bc6b-03782619bc1e\") " pod="openshift-nmstate/nmstate-handler-gmpgg" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.807953 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wj6gl\" (UniqueName: \"kubernetes.io/projected/8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73-kube-api-access-wj6gl\") pod \"nmstate-webhook-6b89b748d8-6vs48\" (UID: \"8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-6vs48" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.813449 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5lx8\" (UniqueName: \"kubernetes.io/projected/10586198-1de3-4da4-9ba1-b79a9785da2f-kube-api-access-s5lx8\") pod \"nmstate-metrics-5dcf9c57c5-2fvh8\" (UID: \"10586198-1de3-4da4-9ba1-b79a9785da2f\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-2fvh8" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.891592 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/523926ce-7459-44b3-bc6b-03782619bc1e-ovs-socket\") pod \"nmstate-handler-gmpgg\" (UID: \"523926ce-7459-44b3-bc6b-03782619bc1e\") " pod="openshift-nmstate/nmstate-handler-gmpgg" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.891645 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89pfl\" (UniqueName: \"kubernetes.io/projected/6d561240-e484-4a96-aff1-aef1a5c56daf-kube-api-access-89pfl\") pod \"nmstate-console-plugin-5874bd7bc5-srctj\" (UID: \"6d561240-e484-4a96-aff1-aef1a5c56daf\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-srctj" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.891695 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/523926ce-7459-44b3-bc6b-03782619bc1e-nmstate-lock\") pod \"nmstate-handler-gmpgg\" (UID: \"523926ce-7459-44b3-bc6b-03782619bc1e\") " pod="openshift-nmstate/nmstate-handler-gmpgg" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.891750 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/523926ce-7459-44b3-bc6b-03782619bc1e-dbus-socket\") pod \"nmstate-handler-gmpgg\" (UID: \"523926ce-7459-44b3-bc6b-03782619bc1e\") " pod="openshift-nmstate/nmstate-handler-gmpgg" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.891783 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49vnh\" (UniqueName: \"kubernetes.io/projected/523926ce-7459-44b3-bc6b-03782619bc1e-kube-api-access-49vnh\") pod \"nmstate-handler-gmpgg\" (UID: \"523926ce-7459-44b3-bc6b-03782619bc1e\") " pod="openshift-nmstate/nmstate-handler-gmpgg" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.891804 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/523926ce-7459-44b3-bc6b-03782619bc1e-nmstate-lock\") pod \"nmstate-handler-gmpgg\" (UID: \"523926ce-7459-44b3-bc6b-03782619bc1e\") " pod="openshift-nmstate/nmstate-handler-gmpgg" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.891819 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/6d561240-e484-4a96-aff1-aef1a5c56daf-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-srctj\" (UID: \"6d561240-e484-4a96-aff1-aef1a5c56daf\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-srctj" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.891869 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6d561240-e484-4a96-aff1-aef1a5c56daf-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-srctj\" (UID: \"6d561240-e484-4a96-aff1-aef1a5c56daf\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-srctj" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.891779 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/523926ce-7459-44b3-bc6b-03782619bc1e-ovs-socket\") pod \"nmstate-handler-gmpgg\" (UID: \"523926ce-7459-44b3-bc6b-03782619bc1e\") " pod="openshift-nmstate/nmstate-handler-gmpgg" Nov 25 23:12:01 crc kubenswrapper[5045]: E1125 23:12:01.892031 5045 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 25 23:12:01 crc kubenswrapper[5045]: E1125 23:12:01.892066 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d561240-e484-4a96-aff1-aef1a5c56daf-plugin-serving-cert podName:6d561240-e484-4a96-aff1-aef1a5c56daf nodeName:}" failed. No retries permitted until 2025-11-25 23:12:02.392053438 +0000 UTC m=+778.749712550 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/6d561240-e484-4a96-aff1-aef1a5c56daf-plugin-serving-cert") pod "nmstate-console-plugin-5874bd7bc5-srctj" (UID: "6d561240-e484-4a96-aff1-aef1a5c56daf") : secret "plugin-serving-cert" not found Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.892083 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/523926ce-7459-44b3-bc6b-03782619bc1e-dbus-socket\") pod \"nmstate-handler-gmpgg\" (UID: \"523926ce-7459-44b3-bc6b-03782619bc1e\") " pod="openshift-nmstate/nmstate-handler-gmpgg" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.892291 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-6b957d5468-w5qn6"] Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.892871 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6d561240-e484-4a96-aff1-aef1a5c56daf-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-srctj\" (UID: \"6d561240-e484-4a96-aff1-aef1a5c56daf\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-srctj" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.893063 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.902492 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6b957d5468-w5qn6"] Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.902596 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-2fvh8" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.916444 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89pfl\" (UniqueName: \"kubernetes.io/projected/6d561240-e484-4a96-aff1-aef1a5c56daf-kube-api-access-89pfl\") pod \"nmstate-console-plugin-5874bd7bc5-srctj\" (UID: \"6d561240-e484-4a96-aff1-aef1a5c56daf\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-srctj" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.932446 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49vnh\" (UniqueName: \"kubernetes.io/projected/523926ce-7459-44b3-bc6b-03782619bc1e-kube-api-access-49vnh\") pod \"nmstate-handler-gmpgg\" (UID: \"523926ce-7459-44b3-bc6b-03782619bc1e\") " pod="openshift-nmstate/nmstate-handler-gmpgg" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.954692 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c529q"] Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.954895 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-c529q" podUID="091d80d3-9ffe-4888-a892-b2b0726b0ceb" containerName="registry-server" containerID="cri-o://ff68b3646dbc40df8caeaa6b166bc0322681965de3b0b42d08d74dd5f13c849a" gracePeriod=2 Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.957661 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-gmpgg" Nov 25 23:12:01 crc kubenswrapper[5045]: W1125 23:12:01.977786 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod523926ce_7459_44b3_bc6b_03782619bc1e.slice/crio-45949a585da63b665ccffba88d65efa90bced721dc07d591cf15feed2514ab3f WatchSource:0}: Error finding container 45949a585da63b665ccffba88d65efa90bced721dc07d591cf15feed2514ab3f: Status 404 returned error can't find the container with id 45949a585da63b665ccffba88d65efa90bced721dc07d591cf15feed2514ab3f Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.996516 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5ead950a-5b22-47a9-a237-508b49269c01-console-config\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.996812 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5ead950a-5b22-47a9-a237-508b49269c01-service-ca\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.996867 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97ql7\" (UniqueName: \"kubernetes.io/projected/5ead950a-5b22-47a9-a237-508b49269c01-kube-api-access-97ql7\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.996927 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5ead950a-5b22-47a9-a237-508b49269c01-console-oauth-config\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.996957 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5ead950a-5b22-47a9-a237-508b49269c01-oauth-serving-cert\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.996994 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5ead950a-5b22-47a9-a237-508b49269c01-console-serving-cert\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:01 crc kubenswrapper[5045]: I1125 23:12:01.997033 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5ead950a-5b22-47a9-a237-508b49269c01-trusted-ca-bundle\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.097912 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97ql7\" (UniqueName: \"kubernetes.io/projected/5ead950a-5b22-47a9-a237-508b49269c01-kube-api-access-97ql7\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.097992 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5ead950a-5b22-47a9-a237-508b49269c01-console-oauth-config\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.098023 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5ead950a-5b22-47a9-a237-508b49269c01-oauth-serving-cert\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.098068 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5ead950a-5b22-47a9-a237-508b49269c01-console-serving-cert\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.098112 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5ead950a-5b22-47a9-a237-508b49269c01-trusted-ca-bundle\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.098154 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5ead950a-5b22-47a9-a237-508b49269c01-service-ca\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.098179 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5ead950a-5b22-47a9-a237-508b49269c01-console-config\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.099582 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5ead950a-5b22-47a9-a237-508b49269c01-service-ca\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.103088 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5ead950a-5b22-47a9-a237-508b49269c01-oauth-serving-cert\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.103890 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5ead950a-5b22-47a9-a237-508b49269c01-console-config\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.104508 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5ead950a-5b22-47a9-a237-508b49269c01-trusted-ca-bundle\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.115414 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5ead950a-5b22-47a9-a237-508b49269c01-console-oauth-config\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.116584 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97ql7\" (UniqueName: \"kubernetes.io/projected/5ead950a-5b22-47a9-a237-508b49269c01-kube-api-access-97ql7\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.117357 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5ead950a-5b22-47a9-a237-508b49269c01-console-serving-cert\") pod \"console-6b957d5468-w5qn6\" (UID: \"5ead950a-5b22-47a9-a237-508b49269c01\") " pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.215987 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.302146 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-6vs48\" (UID: \"8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-6vs48" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.313998 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-2fvh8"] Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.315348 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-6vs48\" (UID: \"8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-6vs48" Nov 25 23:12:02 crc kubenswrapper[5045]: W1125 23:12:02.359029 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod10586198_1de3_4da4_9ba1_b79a9785da2f.slice/crio-b86a454e799eb3237d1bd1f7fda4180da34edc274a11a3e872ba734dfd423e5f WatchSource:0}: Error finding container b86a454e799eb3237d1bd1f7fda4180da34edc274a11a3e872ba734dfd423e5f: Status 404 returned error can't find the container with id b86a454e799eb3237d1bd1f7fda4180da34edc274a11a3e872ba734dfd423e5f Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.402835 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/6d561240-e484-4a96-aff1-aef1a5c56daf-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-srctj\" (UID: \"6d561240-e484-4a96-aff1-aef1a5c56daf\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-srctj" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.407153 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/6d561240-e484-4a96-aff1-aef1a5c56daf-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-srctj\" (UID: \"6d561240-e484-4a96-aff1-aef1a5c56daf\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-srctj" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.471826 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6b957d5468-w5qn6"] Nov 25 23:12:02 crc kubenswrapper[5045]: W1125 23:12:02.475980 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ead950a_5b22_47a9_a237_508b49269c01.slice/crio-0d98cf8384097c26f05e71eb44f75d48035f1cba564a57fe87376e206cd0fc71 WatchSource:0}: Error finding container 0d98cf8384097c26f05e71eb44f75d48035f1cba564a57fe87376e206cd0fc71: Status 404 returned error can't find the container with id 0d98cf8384097c26f05e71eb44f75d48035f1cba564a57fe87376e206cd0fc71 Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.541034 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-6vs48" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.569625 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-2fvh8" event={"ID":"10586198-1de3-4da4-9ba1-b79a9785da2f","Type":"ContainerStarted","Data":"b86a454e799eb3237d1bd1f7fda4180da34edc274a11a3e872ba734dfd423e5f"} Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.571919 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6b957d5468-w5qn6" event={"ID":"5ead950a-5b22-47a9-a237-508b49269c01","Type":"ContainerStarted","Data":"0d98cf8384097c26f05e71eb44f75d48035f1cba564a57fe87376e206cd0fc71"} Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.574416 5045 generic.go:334] "Generic (PLEG): container finished" podID="19cf4a9f-7471-453d-87e0-83adf05881b8" containerID="7afe272181316a1826f3eb69eab1130c349ad9f8102022e95ae9708809c87605" exitCode=0 Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.574510 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m8t7m" event={"ID":"19cf4a9f-7471-453d-87e0-83adf05881b8","Type":"ContainerDied","Data":"7afe272181316a1826f3eb69eab1130c349ad9f8102022e95ae9708809c87605"} Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.580469 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-gmpgg" event={"ID":"523926ce-7459-44b3-bc6b-03782619bc1e","Type":"ContainerStarted","Data":"45949a585da63b665ccffba88d65efa90bced721dc07d591cf15feed2514ab3f"} Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.666523 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-srctj" Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.771578 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-6vs48"] Nov 25 23:12:02 crc kubenswrapper[5045]: W1125 23:12:02.777000 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8d0f4d8f_2c1d_4bdf_9aa0_2971cf03df73.slice/crio-994a548bd873575dea6c469ac731fff674056b2aa13f7e771d9a89b05b9a10c8 WatchSource:0}: Error finding container 994a548bd873575dea6c469ac731fff674056b2aa13f7e771d9a89b05b9a10c8: Status 404 returned error can't find the container with id 994a548bd873575dea6c469ac731fff674056b2aa13f7e771d9a89b05b9a10c8 Nov 25 23:12:02 crc kubenswrapper[5045]: I1125 23:12:02.850371 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-srctj"] Nov 25 23:12:03 crc kubenswrapper[5045]: I1125 23:12:03.587485 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-6vs48" event={"ID":"8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73","Type":"ContainerStarted","Data":"994a548bd873575dea6c469ac731fff674056b2aa13f7e771d9a89b05b9a10c8"} Nov 25 23:12:03 crc kubenswrapper[5045]: I1125 23:12:03.590991 5045 generic.go:334] "Generic (PLEG): container finished" podID="091d80d3-9ffe-4888-a892-b2b0726b0ceb" containerID="ff68b3646dbc40df8caeaa6b166bc0322681965de3b0b42d08d74dd5f13c849a" exitCode=0 Nov 25 23:12:03 crc kubenswrapper[5045]: I1125 23:12:03.591055 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c529q" event={"ID":"091d80d3-9ffe-4888-a892-b2b0726b0ceb","Type":"ContainerDied","Data":"ff68b3646dbc40df8caeaa6b166bc0322681965de3b0b42d08d74dd5f13c849a"} Nov 25 23:12:03 crc kubenswrapper[5045]: I1125 23:12:03.592376 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-srctj" event={"ID":"6d561240-e484-4a96-aff1-aef1a5c56daf","Type":"ContainerStarted","Data":"fe953915d06cb22921f1b5d9a1fc1e9e1e03580f08c14ddc5e62401ef582926d"} Nov 25 23:12:03 crc kubenswrapper[5045]: I1125 23:12:03.594270 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m8t7m" event={"ID":"19cf4a9f-7471-453d-87e0-83adf05881b8","Type":"ContainerStarted","Data":"eb554eac884aaaa9c3f79af36033c6610eb35ba7b65640918109ed9d00452644"} Nov 25 23:12:03 crc kubenswrapper[5045]: I1125 23:12:03.603546 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6b957d5468-w5qn6" event={"ID":"5ead950a-5b22-47a9-a237-508b49269c01","Type":"ContainerStarted","Data":"0e9505e57de02b135196e38edb25df8bff5b7c62ddb23ad833528178f7580a11"} Nov 25 23:12:03 crc kubenswrapper[5045]: I1125 23:12:03.631559 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-m8t7m" podStartSLOduration=3.092505742 podStartE2EDuration="5.631543677s" podCreationTimestamp="2025-11-25 23:11:58 +0000 UTC" firstStartedPulling="2025-11-25 23:12:00.551886122 +0000 UTC m=+776.909545234" lastFinishedPulling="2025-11-25 23:12:03.090924007 +0000 UTC m=+779.448583169" observedRunningTime="2025-11-25 23:12:03.630445515 +0000 UTC m=+779.988104627" watchObservedRunningTime="2025-11-25 23:12:03.631543677 +0000 UTC m=+779.989202789" Nov 25 23:12:03 crc kubenswrapper[5045]: I1125 23:12:03.648980 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-6b957d5468-w5qn6" podStartSLOduration=2.648962192 podStartE2EDuration="2.648962192s" podCreationTimestamp="2025-11-25 23:12:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:12:03.647297873 +0000 UTC m=+780.004956985" watchObservedRunningTime="2025-11-25 23:12:03.648962192 +0000 UTC m=+780.006621314" Nov 25 23:12:03 crc kubenswrapper[5045]: I1125 23:12:03.756906 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c529q" Nov 25 23:12:03 crc kubenswrapper[5045]: I1125 23:12:03.826892 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jnb4q\" (UniqueName: \"kubernetes.io/projected/091d80d3-9ffe-4888-a892-b2b0726b0ceb-kube-api-access-jnb4q\") pod \"091d80d3-9ffe-4888-a892-b2b0726b0ceb\" (UID: \"091d80d3-9ffe-4888-a892-b2b0726b0ceb\") " Nov 25 23:12:03 crc kubenswrapper[5045]: I1125 23:12:03.826996 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/091d80d3-9ffe-4888-a892-b2b0726b0ceb-utilities\") pod \"091d80d3-9ffe-4888-a892-b2b0726b0ceb\" (UID: \"091d80d3-9ffe-4888-a892-b2b0726b0ceb\") " Nov 25 23:12:03 crc kubenswrapper[5045]: I1125 23:12:03.827031 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/091d80d3-9ffe-4888-a892-b2b0726b0ceb-catalog-content\") pod \"091d80d3-9ffe-4888-a892-b2b0726b0ceb\" (UID: \"091d80d3-9ffe-4888-a892-b2b0726b0ceb\") " Nov 25 23:12:03 crc kubenswrapper[5045]: I1125 23:12:03.827659 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/091d80d3-9ffe-4888-a892-b2b0726b0ceb-utilities" (OuterVolumeSpecName: "utilities") pod "091d80d3-9ffe-4888-a892-b2b0726b0ceb" (UID: "091d80d3-9ffe-4888-a892-b2b0726b0ceb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:12:03 crc kubenswrapper[5045]: I1125 23:12:03.832544 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/091d80d3-9ffe-4888-a892-b2b0726b0ceb-kube-api-access-jnb4q" (OuterVolumeSpecName: "kube-api-access-jnb4q") pod "091d80d3-9ffe-4888-a892-b2b0726b0ceb" (UID: "091d80d3-9ffe-4888-a892-b2b0726b0ceb"). InnerVolumeSpecName "kube-api-access-jnb4q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:12:03 crc kubenswrapper[5045]: I1125 23:12:03.915921 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/091d80d3-9ffe-4888-a892-b2b0726b0ceb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "091d80d3-9ffe-4888-a892-b2b0726b0ceb" (UID: "091d80d3-9ffe-4888-a892-b2b0726b0ceb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:12:03 crc kubenswrapper[5045]: I1125 23:12:03.928267 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/091d80d3-9ffe-4888-a892-b2b0726b0ceb-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:12:03 crc kubenswrapper[5045]: I1125 23:12:03.928297 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/091d80d3-9ffe-4888-a892-b2b0726b0ceb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:12:03 crc kubenswrapper[5045]: I1125 23:12:03.928309 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jnb4q\" (UniqueName: \"kubernetes.io/projected/091d80d3-9ffe-4888-a892-b2b0726b0ceb-kube-api-access-jnb4q\") on node \"crc\" DevicePath \"\"" Nov 25 23:12:04 crc kubenswrapper[5045]: I1125 23:12:04.617906 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c529q" event={"ID":"091d80d3-9ffe-4888-a892-b2b0726b0ceb","Type":"ContainerDied","Data":"786659f37070409652584ffc4b894f1062fdd6d91d558a08c8afcd62b843f626"} Nov 25 23:12:04 crc kubenswrapper[5045]: I1125 23:12:04.618192 5045 scope.go:117] "RemoveContainer" containerID="ff68b3646dbc40df8caeaa6b166bc0322681965de3b0b42d08d74dd5f13c849a" Nov 25 23:12:04 crc kubenswrapper[5045]: I1125 23:12:04.617920 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c529q" Nov 25 23:12:04 crc kubenswrapper[5045]: I1125 23:12:04.640352 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c529q"] Nov 25 23:12:04 crc kubenswrapper[5045]: I1125 23:12:04.648701 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-c529q"] Nov 25 23:12:05 crc kubenswrapper[5045]: I1125 23:12:05.339980 5045 scope.go:117] "RemoveContainer" containerID="b4b3e201da997bedb72a2b72d7b50db47027137da7ea54a040ae1017b9da82eb" Nov 25 23:12:05 crc kubenswrapper[5045]: I1125 23:12:05.417404 5045 scope.go:117] "RemoveContainer" containerID="a2019bef214b8b4b12d25145405570bbff2d73c722654f2244f6a70bfa456a06" Nov 25 23:12:06 crc kubenswrapper[5045]: I1125 23:12:06.417603 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="091d80d3-9ffe-4888-a892-b2b0726b0ceb" path="/var/lib/kubelet/pods/091d80d3-9ffe-4888-a892-b2b0726b0ceb/volumes" Nov 25 23:12:06 crc kubenswrapper[5045]: I1125 23:12:06.643233 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-srctj" event={"ID":"6d561240-e484-4a96-aff1-aef1a5c56daf","Type":"ContainerStarted","Data":"0e025b398a4f437638d01dfb7b05c6dcde6a216d54171abd255a5477d38656a3"} Nov 25 23:12:06 crc kubenswrapper[5045]: I1125 23:12:06.646976 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-gmpgg" event={"ID":"523926ce-7459-44b3-bc6b-03782619bc1e","Type":"ContainerStarted","Data":"941931c36f0bcb5db0be7a17487c9f6e5cadfcbad2b9d35878ad7d9c830973bc"} Nov 25 23:12:06 crc kubenswrapper[5045]: I1125 23:12:06.647097 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-gmpgg" Nov 25 23:12:06 crc kubenswrapper[5045]: I1125 23:12:06.648742 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-6vs48" event={"ID":"8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73","Type":"ContainerStarted","Data":"85a2036df6d4523d7dba2752e706b8226e75a49f77a2b6fddd526ca931d28245"} Nov 25 23:12:06 crc kubenswrapper[5045]: I1125 23:12:06.648825 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-6vs48" Nov 25 23:12:06 crc kubenswrapper[5045]: I1125 23:12:06.650898 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-2fvh8" event={"ID":"10586198-1de3-4da4-9ba1-b79a9785da2f","Type":"ContainerStarted","Data":"e3083aebc8be0477d5389f1be65eb3c5bd9eda7c63da0b60a1d4db0ba18ba000"} Nov 25 23:12:06 crc kubenswrapper[5045]: I1125 23:12:06.668052 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-srctj" podStartSLOduration=3.063811435 podStartE2EDuration="5.668037517s" podCreationTimestamp="2025-11-25 23:12:01 +0000 UTC" firstStartedPulling="2025-11-25 23:12:02.85919242 +0000 UTC m=+779.216851532" lastFinishedPulling="2025-11-25 23:12:05.463418462 +0000 UTC m=+781.821077614" observedRunningTime="2025-11-25 23:12:06.662765085 +0000 UTC m=+783.020424237" watchObservedRunningTime="2025-11-25 23:12:06.668037517 +0000 UTC m=+783.025696639" Nov 25 23:12:06 crc kubenswrapper[5045]: I1125 23:12:06.687029 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-gmpgg" podStartSLOduration=2.197694801 podStartE2EDuration="5.687011897s" podCreationTimestamp="2025-11-25 23:12:01 +0000 UTC" firstStartedPulling="2025-11-25 23:12:01.979651917 +0000 UTC m=+778.337311029" lastFinishedPulling="2025-11-25 23:12:05.468969003 +0000 UTC m=+781.826628125" observedRunningTime="2025-11-25 23:12:06.682041233 +0000 UTC m=+783.039700435" watchObservedRunningTime="2025-11-25 23:12:06.687011897 +0000 UTC m=+783.044671019" Nov 25 23:12:06 crc kubenswrapper[5045]: I1125 23:12:06.710197 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-6vs48" podStartSLOduration=3.005373232 podStartE2EDuration="5.710181139s" podCreationTimestamp="2025-11-25 23:12:01 +0000 UTC" firstStartedPulling="2025-11-25 23:12:02.778760039 +0000 UTC m=+779.136419151" lastFinishedPulling="2025-11-25 23:12:05.483567926 +0000 UTC m=+781.841227058" observedRunningTime="2025-11-25 23:12:06.703634539 +0000 UTC m=+783.061293661" watchObservedRunningTime="2025-11-25 23:12:06.710181139 +0000 UTC m=+783.067840261" Nov 25 23:12:08 crc kubenswrapper[5045]: I1125 23:12:08.665615 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-2fvh8" event={"ID":"10586198-1de3-4da4-9ba1-b79a9785da2f","Type":"ContainerStarted","Data":"003bd47fbd9a225a1688915274f4523ab9ade96be46e13376cc5711a6d6f04de"} Nov 25 23:12:08 crc kubenswrapper[5045]: I1125 23:12:08.704106 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-2fvh8" podStartSLOduration=2.210594295 podStartE2EDuration="7.704089681s" podCreationTimestamp="2025-11-25 23:12:01 +0000 UTC" firstStartedPulling="2025-11-25 23:12:02.363675108 +0000 UTC m=+778.721334220" lastFinishedPulling="2025-11-25 23:12:07.857170494 +0000 UTC m=+784.214829606" observedRunningTime="2025-11-25 23:12:08.701406533 +0000 UTC m=+785.059065715" watchObservedRunningTime="2025-11-25 23:12:08.704089681 +0000 UTC m=+785.061748793" Nov 25 23:12:08 crc kubenswrapper[5045]: I1125 23:12:08.930396 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-m8t7m" Nov 25 23:12:08 crc kubenswrapper[5045]: I1125 23:12:08.930474 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-m8t7m" Nov 25 23:12:08 crc kubenswrapper[5045]: I1125 23:12:08.998128 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-m8t7m" Nov 25 23:12:09 crc kubenswrapper[5045]: I1125 23:12:09.745546 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-m8t7m" Nov 25 23:12:09 crc kubenswrapper[5045]: I1125 23:12:09.814736 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-m8t7m"] Nov 25 23:12:11 crc kubenswrapper[5045]: I1125 23:12:11.688871 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-m8t7m" podUID="19cf4a9f-7471-453d-87e0-83adf05881b8" containerName="registry-server" containerID="cri-o://eb554eac884aaaa9c3f79af36033c6610eb35ba7b65640918109ed9d00452644" gracePeriod=2 Nov 25 23:12:11 crc kubenswrapper[5045]: I1125 23:12:11.994911 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-gmpgg" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.082007 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m8t7m" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.142504 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5942r\" (UniqueName: \"kubernetes.io/projected/19cf4a9f-7471-453d-87e0-83adf05881b8-kube-api-access-5942r\") pod \"19cf4a9f-7471-453d-87e0-83adf05881b8\" (UID: \"19cf4a9f-7471-453d-87e0-83adf05881b8\") " Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.142558 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19cf4a9f-7471-453d-87e0-83adf05881b8-utilities\") pod \"19cf4a9f-7471-453d-87e0-83adf05881b8\" (UID: \"19cf4a9f-7471-453d-87e0-83adf05881b8\") " Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.142607 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19cf4a9f-7471-453d-87e0-83adf05881b8-catalog-content\") pod \"19cf4a9f-7471-453d-87e0-83adf05881b8\" (UID: \"19cf4a9f-7471-453d-87e0-83adf05881b8\") " Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.144124 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19cf4a9f-7471-453d-87e0-83adf05881b8-utilities" (OuterVolumeSpecName: "utilities") pod "19cf4a9f-7471-453d-87e0-83adf05881b8" (UID: "19cf4a9f-7471-453d-87e0-83adf05881b8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.148223 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19cf4a9f-7471-453d-87e0-83adf05881b8-kube-api-access-5942r" (OuterVolumeSpecName: "kube-api-access-5942r") pod "19cf4a9f-7471-453d-87e0-83adf05881b8" (UID: "19cf4a9f-7471-453d-87e0-83adf05881b8"). InnerVolumeSpecName "kube-api-access-5942r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.172934 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19cf4a9f-7471-453d-87e0-83adf05881b8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "19cf4a9f-7471-453d-87e0-83adf05881b8" (UID: "19cf4a9f-7471-453d-87e0-83adf05881b8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.216476 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.216522 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.223473 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.244559 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19cf4a9f-7471-453d-87e0-83adf05881b8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.244615 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5942r\" (UniqueName: \"kubernetes.io/projected/19cf4a9f-7471-453d-87e0-83adf05881b8-kube-api-access-5942r\") on node \"crc\" DevicePath \"\"" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.244637 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19cf4a9f-7471-453d-87e0-83adf05881b8-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.698172 5045 generic.go:334] "Generic (PLEG): container finished" podID="19cf4a9f-7471-453d-87e0-83adf05881b8" containerID="eb554eac884aaaa9c3f79af36033c6610eb35ba7b65640918109ed9d00452644" exitCode=0 Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.698231 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m8t7m" event={"ID":"19cf4a9f-7471-453d-87e0-83adf05881b8","Type":"ContainerDied","Data":"eb554eac884aaaa9c3f79af36033c6610eb35ba7b65640918109ed9d00452644"} Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.698299 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m8t7m" event={"ID":"19cf4a9f-7471-453d-87e0-83adf05881b8","Type":"ContainerDied","Data":"6e80c4c1cc6440c2462aaf5fc1d7914f47f1e736a89b6fe43965fe4987679413"} Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.698315 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m8t7m" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.698330 5045 scope.go:117] "RemoveContainer" containerID="eb554eac884aaaa9c3f79af36033c6610eb35ba7b65640918109ed9d00452644" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.706824 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-6b957d5468-w5qn6" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.733067 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-m8t7m"] Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.738534 5045 scope.go:117] "RemoveContainer" containerID="7afe272181316a1826f3eb69eab1130c349ad9f8102022e95ae9708809c87605" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.742744 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-m8t7m"] Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.779455 5045 scope.go:117] "RemoveContainer" containerID="5cad136d01d4ff4b13dede2c25653d9e40f7fe6ee885755e70622b67245f49b7" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.810963 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-gmw7c"] Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.837993 5045 scope.go:117] "RemoveContainer" containerID="eb554eac884aaaa9c3f79af36033c6610eb35ba7b65640918109ed9d00452644" Nov 25 23:12:12 crc kubenswrapper[5045]: E1125 23:12:12.842814 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb554eac884aaaa9c3f79af36033c6610eb35ba7b65640918109ed9d00452644\": container with ID starting with eb554eac884aaaa9c3f79af36033c6610eb35ba7b65640918109ed9d00452644 not found: ID does not exist" containerID="eb554eac884aaaa9c3f79af36033c6610eb35ba7b65640918109ed9d00452644" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.842866 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb554eac884aaaa9c3f79af36033c6610eb35ba7b65640918109ed9d00452644"} err="failed to get container status \"eb554eac884aaaa9c3f79af36033c6610eb35ba7b65640918109ed9d00452644\": rpc error: code = NotFound desc = could not find container \"eb554eac884aaaa9c3f79af36033c6610eb35ba7b65640918109ed9d00452644\": container with ID starting with eb554eac884aaaa9c3f79af36033c6610eb35ba7b65640918109ed9d00452644 not found: ID does not exist" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.842897 5045 scope.go:117] "RemoveContainer" containerID="7afe272181316a1826f3eb69eab1130c349ad9f8102022e95ae9708809c87605" Nov 25 23:12:12 crc kubenswrapper[5045]: E1125 23:12:12.843456 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7afe272181316a1826f3eb69eab1130c349ad9f8102022e95ae9708809c87605\": container with ID starting with 7afe272181316a1826f3eb69eab1130c349ad9f8102022e95ae9708809c87605 not found: ID does not exist" containerID="7afe272181316a1826f3eb69eab1130c349ad9f8102022e95ae9708809c87605" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.843493 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7afe272181316a1826f3eb69eab1130c349ad9f8102022e95ae9708809c87605"} err="failed to get container status \"7afe272181316a1826f3eb69eab1130c349ad9f8102022e95ae9708809c87605\": rpc error: code = NotFound desc = could not find container \"7afe272181316a1826f3eb69eab1130c349ad9f8102022e95ae9708809c87605\": container with ID starting with 7afe272181316a1826f3eb69eab1130c349ad9f8102022e95ae9708809c87605 not found: ID does not exist" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.843517 5045 scope.go:117] "RemoveContainer" containerID="5cad136d01d4ff4b13dede2c25653d9e40f7fe6ee885755e70622b67245f49b7" Nov 25 23:12:12 crc kubenswrapper[5045]: E1125 23:12:12.843761 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cad136d01d4ff4b13dede2c25653d9e40f7fe6ee885755e70622b67245f49b7\": container with ID starting with 5cad136d01d4ff4b13dede2c25653d9e40f7fe6ee885755e70622b67245f49b7 not found: ID does not exist" containerID="5cad136d01d4ff4b13dede2c25653d9e40f7fe6ee885755e70622b67245f49b7" Nov 25 23:12:12 crc kubenswrapper[5045]: I1125 23:12:12.843786 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cad136d01d4ff4b13dede2c25653d9e40f7fe6ee885755e70622b67245f49b7"} err="failed to get container status \"5cad136d01d4ff4b13dede2c25653d9e40f7fe6ee885755e70622b67245f49b7\": rpc error: code = NotFound desc = could not find container \"5cad136d01d4ff4b13dede2c25653d9e40f7fe6ee885755e70622b67245f49b7\": container with ID starting with 5cad136d01d4ff4b13dede2c25653d9e40f7fe6ee885755e70622b67245f49b7 not found: ID does not exist" Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.413646 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19cf4a9f-7471-453d-87e0-83adf05881b8" path="/var/lib/kubelet/pods/19cf4a9f-7471-453d-87e0-83adf05881b8/volumes" Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.668913 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-cw5kr"] Nov 25 23:12:14 crc kubenswrapper[5045]: E1125 23:12:14.669495 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19cf4a9f-7471-453d-87e0-83adf05881b8" containerName="registry-server" Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.669529 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="19cf4a9f-7471-453d-87e0-83adf05881b8" containerName="registry-server" Nov 25 23:12:14 crc kubenswrapper[5045]: E1125 23:12:14.669580 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19cf4a9f-7471-453d-87e0-83adf05881b8" containerName="extract-utilities" Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.669593 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="19cf4a9f-7471-453d-87e0-83adf05881b8" containerName="extract-utilities" Nov 25 23:12:14 crc kubenswrapper[5045]: E1125 23:12:14.669612 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="091d80d3-9ffe-4888-a892-b2b0726b0ceb" containerName="extract-content" Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.669625 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="091d80d3-9ffe-4888-a892-b2b0726b0ceb" containerName="extract-content" Nov 25 23:12:14 crc kubenswrapper[5045]: E1125 23:12:14.669639 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="091d80d3-9ffe-4888-a892-b2b0726b0ceb" containerName="registry-server" Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.669651 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="091d80d3-9ffe-4888-a892-b2b0726b0ceb" containerName="registry-server" Nov 25 23:12:14 crc kubenswrapper[5045]: E1125 23:12:14.669663 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="091d80d3-9ffe-4888-a892-b2b0726b0ceb" containerName="extract-utilities" Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.669675 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="091d80d3-9ffe-4888-a892-b2b0726b0ceb" containerName="extract-utilities" Nov 25 23:12:14 crc kubenswrapper[5045]: E1125 23:12:14.669697 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19cf4a9f-7471-453d-87e0-83adf05881b8" containerName="extract-content" Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.669737 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="19cf4a9f-7471-453d-87e0-83adf05881b8" containerName="extract-content" Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.669937 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="19cf4a9f-7471-453d-87e0-83adf05881b8" containerName="registry-server" Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.669964 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="091d80d3-9ffe-4888-a892-b2b0726b0ceb" containerName="registry-server" Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.671385 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cw5kr" Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.674613 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cw5kr"] Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.787622 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bfbce68-72bf-4f0e-9e49-a60817a0e5ca-utilities\") pod \"certified-operators-cw5kr\" (UID: \"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca\") " pod="openshift-marketplace/certified-operators-cw5kr" Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.787759 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2zvq\" (UniqueName: \"kubernetes.io/projected/6bfbce68-72bf-4f0e-9e49-a60817a0e5ca-kube-api-access-w2zvq\") pod \"certified-operators-cw5kr\" (UID: \"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca\") " pod="openshift-marketplace/certified-operators-cw5kr" Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.787904 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bfbce68-72bf-4f0e-9e49-a60817a0e5ca-catalog-content\") pod \"certified-operators-cw5kr\" (UID: \"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca\") " pod="openshift-marketplace/certified-operators-cw5kr" Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.889835 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bfbce68-72bf-4f0e-9e49-a60817a0e5ca-catalog-content\") pod \"certified-operators-cw5kr\" (UID: \"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca\") " pod="openshift-marketplace/certified-operators-cw5kr" Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.890036 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bfbce68-72bf-4f0e-9e49-a60817a0e5ca-utilities\") pod \"certified-operators-cw5kr\" (UID: \"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca\") " pod="openshift-marketplace/certified-operators-cw5kr" Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.890113 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2zvq\" (UniqueName: \"kubernetes.io/projected/6bfbce68-72bf-4f0e-9e49-a60817a0e5ca-kube-api-access-w2zvq\") pod \"certified-operators-cw5kr\" (UID: \"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca\") " pod="openshift-marketplace/certified-operators-cw5kr" Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.890960 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bfbce68-72bf-4f0e-9e49-a60817a0e5ca-catalog-content\") pod \"certified-operators-cw5kr\" (UID: \"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca\") " pod="openshift-marketplace/certified-operators-cw5kr" Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.892186 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bfbce68-72bf-4f0e-9e49-a60817a0e5ca-utilities\") pod \"certified-operators-cw5kr\" (UID: \"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca\") " pod="openshift-marketplace/certified-operators-cw5kr" Nov 25 23:12:14 crc kubenswrapper[5045]: I1125 23:12:14.921842 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2zvq\" (UniqueName: \"kubernetes.io/projected/6bfbce68-72bf-4f0e-9e49-a60817a0e5ca-kube-api-access-w2zvq\") pod \"certified-operators-cw5kr\" (UID: \"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca\") " pod="openshift-marketplace/certified-operators-cw5kr" Nov 25 23:12:15 crc kubenswrapper[5045]: I1125 23:12:15.006784 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cw5kr" Nov 25 23:12:15 crc kubenswrapper[5045]: I1125 23:12:15.259266 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cw5kr"] Nov 25 23:12:15 crc kubenswrapper[5045]: W1125 23:12:15.263250 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6bfbce68_72bf_4f0e_9e49_a60817a0e5ca.slice/crio-29768e87c7b7e6e5d20bc1261a20f2aa28cefbc80912a072d4c687f46226020f WatchSource:0}: Error finding container 29768e87c7b7e6e5d20bc1261a20f2aa28cefbc80912a072d4c687f46226020f: Status 404 returned error can't find the container with id 29768e87c7b7e6e5d20bc1261a20f2aa28cefbc80912a072d4c687f46226020f Nov 25 23:12:15 crc kubenswrapper[5045]: I1125 23:12:15.732829 5045 generic.go:334] "Generic (PLEG): container finished" podID="6bfbce68-72bf-4f0e-9e49-a60817a0e5ca" containerID="4776fedf7ba7e62b2f2bea17288844bfa872d6afdc2787008b6de9ecaa69dc4b" exitCode=0 Nov 25 23:12:15 crc kubenswrapper[5045]: I1125 23:12:15.732947 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cw5kr" event={"ID":"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca","Type":"ContainerDied","Data":"4776fedf7ba7e62b2f2bea17288844bfa872d6afdc2787008b6de9ecaa69dc4b"} Nov 25 23:12:15 crc kubenswrapper[5045]: I1125 23:12:15.733203 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cw5kr" event={"ID":"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca","Type":"ContainerStarted","Data":"29768e87c7b7e6e5d20bc1261a20f2aa28cefbc80912a072d4c687f46226020f"} Nov 25 23:12:16 crc kubenswrapper[5045]: I1125 23:12:16.742658 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cw5kr" event={"ID":"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca","Type":"ContainerStarted","Data":"b0e2b58b200f791ab808ce7c4f282a36f13868c7d15adf64c47d066707a4a895"} Nov 25 23:12:17 crc kubenswrapper[5045]: I1125 23:12:17.753474 5045 generic.go:334] "Generic (PLEG): container finished" podID="6bfbce68-72bf-4f0e-9e49-a60817a0e5ca" containerID="b0e2b58b200f791ab808ce7c4f282a36f13868c7d15adf64c47d066707a4a895" exitCode=0 Nov 25 23:12:17 crc kubenswrapper[5045]: I1125 23:12:17.753554 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cw5kr" event={"ID":"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca","Type":"ContainerDied","Data":"b0e2b58b200f791ab808ce7c4f282a36f13868c7d15adf64c47d066707a4a895"} Nov 25 23:12:18 crc kubenswrapper[5045]: I1125 23:12:18.766520 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cw5kr" event={"ID":"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca","Type":"ContainerStarted","Data":"66ed884ec6110ff60b62615c5f1bd1e26f177c274edf3024f5876940cbabfb5e"} Nov 25 23:12:18 crc kubenswrapper[5045]: I1125 23:12:18.790143 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-cw5kr" podStartSLOduration=2.346553936 podStartE2EDuration="4.790113092s" podCreationTimestamp="2025-11-25 23:12:14 +0000 UTC" firstStartedPulling="2025-11-25 23:12:15.736193775 +0000 UTC m=+792.093852927" lastFinishedPulling="2025-11-25 23:12:18.179752941 +0000 UTC m=+794.537412083" observedRunningTime="2025-11-25 23:12:18.788409093 +0000 UTC m=+795.146068275" watchObservedRunningTime="2025-11-25 23:12:18.790113092 +0000 UTC m=+795.147772244" Nov 25 23:12:22 crc kubenswrapper[5045]: I1125 23:12:22.549952 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-6vs48" Nov 25 23:12:25 crc kubenswrapper[5045]: I1125 23:12:25.007406 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-cw5kr" Nov 25 23:12:25 crc kubenswrapper[5045]: I1125 23:12:25.007516 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-cw5kr" Nov 25 23:12:25 crc kubenswrapper[5045]: I1125 23:12:25.085203 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-cw5kr" Nov 25 23:12:25 crc kubenswrapper[5045]: I1125 23:12:25.878061 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-cw5kr" Nov 25 23:12:25 crc kubenswrapper[5045]: I1125 23:12:25.933466 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cw5kr"] Nov 25 23:12:27 crc kubenswrapper[5045]: I1125 23:12:27.826654 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-cw5kr" podUID="6bfbce68-72bf-4f0e-9e49-a60817a0e5ca" containerName="registry-server" containerID="cri-o://66ed884ec6110ff60b62615c5f1bd1e26f177c274edf3024f5876940cbabfb5e" gracePeriod=2 Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.189840 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cw5kr" Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.329844 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bfbce68-72bf-4f0e-9e49-a60817a0e5ca-catalog-content\") pod \"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca\" (UID: \"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca\") " Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.330005 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w2zvq\" (UniqueName: \"kubernetes.io/projected/6bfbce68-72bf-4f0e-9e49-a60817a0e5ca-kube-api-access-w2zvq\") pod \"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca\" (UID: \"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca\") " Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.330070 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bfbce68-72bf-4f0e-9e49-a60817a0e5ca-utilities\") pod \"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca\" (UID: \"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca\") " Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.330885 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6bfbce68-72bf-4f0e-9e49-a60817a0e5ca-utilities" (OuterVolumeSpecName: "utilities") pod "6bfbce68-72bf-4f0e-9e49-a60817a0e5ca" (UID: "6bfbce68-72bf-4f0e-9e49-a60817a0e5ca"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.334988 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6bfbce68-72bf-4f0e-9e49-a60817a0e5ca-kube-api-access-w2zvq" (OuterVolumeSpecName: "kube-api-access-w2zvq") pod "6bfbce68-72bf-4f0e-9e49-a60817a0e5ca" (UID: "6bfbce68-72bf-4f0e-9e49-a60817a0e5ca"). InnerVolumeSpecName "kube-api-access-w2zvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.374367 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6bfbce68-72bf-4f0e-9e49-a60817a0e5ca-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6bfbce68-72bf-4f0e-9e49-a60817a0e5ca" (UID: "6bfbce68-72bf-4f0e-9e49-a60817a0e5ca"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.431516 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bfbce68-72bf-4f0e-9e49-a60817a0e5ca-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.431544 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bfbce68-72bf-4f0e-9e49-a60817a0e5ca-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.431555 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w2zvq\" (UniqueName: \"kubernetes.io/projected/6bfbce68-72bf-4f0e-9e49-a60817a0e5ca-kube-api-access-w2zvq\") on node \"crc\" DevicePath \"\"" Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.834325 5045 generic.go:334] "Generic (PLEG): container finished" podID="6bfbce68-72bf-4f0e-9e49-a60817a0e5ca" containerID="66ed884ec6110ff60b62615c5f1bd1e26f177c274edf3024f5876940cbabfb5e" exitCode=0 Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.834417 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cw5kr" event={"ID":"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca","Type":"ContainerDied","Data":"66ed884ec6110ff60b62615c5f1bd1e26f177c274edf3024f5876940cbabfb5e"} Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.834475 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cw5kr" event={"ID":"6bfbce68-72bf-4f0e-9e49-a60817a0e5ca","Type":"ContainerDied","Data":"29768e87c7b7e6e5d20bc1261a20f2aa28cefbc80912a072d4c687f46226020f"} Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.834476 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cw5kr" Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.834494 5045 scope.go:117] "RemoveContainer" containerID="66ed884ec6110ff60b62615c5f1bd1e26f177c274edf3024f5876940cbabfb5e" Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.851706 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cw5kr"] Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.856078 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-cw5kr"] Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.861271 5045 scope.go:117] "RemoveContainer" containerID="b0e2b58b200f791ab808ce7c4f282a36f13868c7d15adf64c47d066707a4a895" Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.881062 5045 scope.go:117] "RemoveContainer" containerID="4776fedf7ba7e62b2f2bea17288844bfa872d6afdc2787008b6de9ecaa69dc4b" Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.912543 5045 scope.go:117] "RemoveContainer" containerID="66ed884ec6110ff60b62615c5f1bd1e26f177c274edf3024f5876940cbabfb5e" Nov 25 23:12:28 crc kubenswrapper[5045]: E1125 23:12:28.913441 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66ed884ec6110ff60b62615c5f1bd1e26f177c274edf3024f5876940cbabfb5e\": container with ID starting with 66ed884ec6110ff60b62615c5f1bd1e26f177c274edf3024f5876940cbabfb5e not found: ID does not exist" containerID="66ed884ec6110ff60b62615c5f1bd1e26f177c274edf3024f5876940cbabfb5e" Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.913504 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66ed884ec6110ff60b62615c5f1bd1e26f177c274edf3024f5876940cbabfb5e"} err="failed to get container status \"66ed884ec6110ff60b62615c5f1bd1e26f177c274edf3024f5876940cbabfb5e\": rpc error: code = NotFound desc = could not find container \"66ed884ec6110ff60b62615c5f1bd1e26f177c274edf3024f5876940cbabfb5e\": container with ID starting with 66ed884ec6110ff60b62615c5f1bd1e26f177c274edf3024f5876940cbabfb5e not found: ID does not exist" Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.913531 5045 scope.go:117] "RemoveContainer" containerID="b0e2b58b200f791ab808ce7c4f282a36f13868c7d15adf64c47d066707a4a895" Nov 25 23:12:28 crc kubenswrapper[5045]: E1125 23:12:28.914519 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0e2b58b200f791ab808ce7c4f282a36f13868c7d15adf64c47d066707a4a895\": container with ID starting with b0e2b58b200f791ab808ce7c4f282a36f13868c7d15adf64c47d066707a4a895 not found: ID does not exist" containerID="b0e2b58b200f791ab808ce7c4f282a36f13868c7d15adf64c47d066707a4a895" Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.914589 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0e2b58b200f791ab808ce7c4f282a36f13868c7d15adf64c47d066707a4a895"} err="failed to get container status \"b0e2b58b200f791ab808ce7c4f282a36f13868c7d15adf64c47d066707a4a895\": rpc error: code = NotFound desc = could not find container \"b0e2b58b200f791ab808ce7c4f282a36f13868c7d15adf64c47d066707a4a895\": container with ID starting with b0e2b58b200f791ab808ce7c4f282a36f13868c7d15adf64c47d066707a4a895 not found: ID does not exist" Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.914632 5045 scope.go:117] "RemoveContainer" containerID="4776fedf7ba7e62b2f2bea17288844bfa872d6afdc2787008b6de9ecaa69dc4b" Nov 25 23:12:28 crc kubenswrapper[5045]: E1125 23:12:28.915125 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4776fedf7ba7e62b2f2bea17288844bfa872d6afdc2787008b6de9ecaa69dc4b\": container with ID starting with 4776fedf7ba7e62b2f2bea17288844bfa872d6afdc2787008b6de9ecaa69dc4b not found: ID does not exist" containerID="4776fedf7ba7e62b2f2bea17288844bfa872d6afdc2787008b6de9ecaa69dc4b" Nov 25 23:12:28 crc kubenswrapper[5045]: I1125 23:12:28.915157 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4776fedf7ba7e62b2f2bea17288844bfa872d6afdc2787008b6de9ecaa69dc4b"} err="failed to get container status \"4776fedf7ba7e62b2f2bea17288844bfa872d6afdc2787008b6de9ecaa69dc4b\": rpc error: code = NotFound desc = could not find container \"4776fedf7ba7e62b2f2bea17288844bfa872d6afdc2787008b6de9ecaa69dc4b\": container with ID starting with 4776fedf7ba7e62b2f2bea17288844bfa872d6afdc2787008b6de9ecaa69dc4b not found: ID does not exist" Nov 25 23:12:30 crc kubenswrapper[5045]: I1125 23:12:30.404169 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6bfbce68-72bf-4f0e-9e49-a60817a0e5ca" path="/var/lib/kubelet/pods/6bfbce68-72bf-4f0e-9e49-a60817a0e5ca/volumes" Nov 25 23:12:35 crc kubenswrapper[5045]: I1125 23:12:35.932209 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv"] Nov 25 23:12:35 crc kubenswrapper[5045]: E1125 23:12:35.932951 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bfbce68-72bf-4f0e-9e49-a60817a0e5ca" containerName="extract-content" Nov 25 23:12:35 crc kubenswrapper[5045]: I1125 23:12:35.932966 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bfbce68-72bf-4f0e-9e49-a60817a0e5ca" containerName="extract-content" Nov 25 23:12:35 crc kubenswrapper[5045]: E1125 23:12:35.932982 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bfbce68-72bf-4f0e-9e49-a60817a0e5ca" containerName="extract-utilities" Nov 25 23:12:35 crc kubenswrapper[5045]: I1125 23:12:35.932990 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bfbce68-72bf-4f0e-9e49-a60817a0e5ca" containerName="extract-utilities" Nov 25 23:12:35 crc kubenswrapper[5045]: E1125 23:12:35.933001 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bfbce68-72bf-4f0e-9e49-a60817a0e5ca" containerName="registry-server" Nov 25 23:12:35 crc kubenswrapper[5045]: I1125 23:12:35.933013 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bfbce68-72bf-4f0e-9e49-a60817a0e5ca" containerName="registry-server" Nov 25 23:12:35 crc kubenswrapper[5045]: I1125 23:12:35.933141 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="6bfbce68-72bf-4f0e-9e49-a60817a0e5ca" containerName="registry-server" Nov 25 23:12:35 crc kubenswrapper[5045]: I1125 23:12:35.934050 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv" Nov 25 23:12:35 crc kubenswrapper[5045]: I1125 23:12:35.936608 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 23:12:35 crc kubenswrapper[5045]: I1125 23:12:35.944470 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv"] Nov 25 23:12:36 crc kubenswrapper[5045]: I1125 23:12:36.053980 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/95a24d51-cffe-4618-b4f0-324815a1848e-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv\" (UID: \"95a24d51-cffe-4618-b4f0-324815a1848e\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv" Nov 25 23:12:36 crc kubenswrapper[5045]: I1125 23:12:36.054405 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/95a24d51-cffe-4618-b4f0-324815a1848e-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv\" (UID: \"95a24d51-cffe-4618-b4f0-324815a1848e\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv" Nov 25 23:12:36 crc kubenswrapper[5045]: I1125 23:12:36.054440 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2pgd\" (UniqueName: \"kubernetes.io/projected/95a24d51-cffe-4618-b4f0-324815a1848e-kube-api-access-t2pgd\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv\" (UID: \"95a24d51-cffe-4618-b4f0-324815a1848e\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv" Nov 25 23:12:36 crc kubenswrapper[5045]: I1125 23:12:36.155782 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/95a24d51-cffe-4618-b4f0-324815a1848e-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv\" (UID: \"95a24d51-cffe-4618-b4f0-324815a1848e\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv" Nov 25 23:12:36 crc kubenswrapper[5045]: I1125 23:12:36.155866 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2pgd\" (UniqueName: \"kubernetes.io/projected/95a24d51-cffe-4618-b4f0-324815a1848e-kube-api-access-t2pgd\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv\" (UID: \"95a24d51-cffe-4618-b4f0-324815a1848e\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv" Nov 25 23:12:36 crc kubenswrapper[5045]: I1125 23:12:36.155932 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/95a24d51-cffe-4618-b4f0-324815a1848e-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv\" (UID: \"95a24d51-cffe-4618-b4f0-324815a1848e\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv" Nov 25 23:12:36 crc kubenswrapper[5045]: I1125 23:12:36.156539 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/95a24d51-cffe-4618-b4f0-324815a1848e-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv\" (UID: \"95a24d51-cffe-4618-b4f0-324815a1848e\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv" Nov 25 23:12:36 crc kubenswrapper[5045]: I1125 23:12:36.156553 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/95a24d51-cffe-4618-b4f0-324815a1848e-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv\" (UID: \"95a24d51-cffe-4618-b4f0-324815a1848e\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv" Nov 25 23:12:36 crc kubenswrapper[5045]: I1125 23:12:36.189330 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2pgd\" (UniqueName: \"kubernetes.io/projected/95a24d51-cffe-4618-b4f0-324815a1848e-kube-api-access-t2pgd\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv\" (UID: \"95a24d51-cffe-4618-b4f0-324815a1848e\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv" Nov 25 23:12:36 crc kubenswrapper[5045]: I1125 23:12:36.256767 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv" Nov 25 23:12:36 crc kubenswrapper[5045]: I1125 23:12:36.762995 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv"] Nov 25 23:12:36 crc kubenswrapper[5045]: I1125 23:12:36.896028 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv" event={"ID":"95a24d51-cffe-4618-b4f0-324815a1848e","Type":"ContainerStarted","Data":"523877d0d565781753a237dd87d042df16b6e47638466b3aa1c7537ff26acc46"} Nov 25 23:12:37 crc kubenswrapper[5045]: I1125 23:12:37.889679 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-gmw7c" podUID="2be9acdf-dc72-44e0-8674-ea5ba59cbbaa" containerName="console" containerID="cri-o://11c1ba57d78738f2fcfc0a708136b693c7ea7633863ad97c89af909d3272d625" gracePeriod=15 Nov 25 23:12:37 crc kubenswrapper[5045]: I1125 23:12:37.914666 5045 generic.go:334] "Generic (PLEG): container finished" podID="95a24d51-cffe-4618-b4f0-324815a1848e" containerID="9c5213c19d3634f8ab25204d654d5823206ca14a1abe81e0b88d868eb8f55cd2" exitCode=0 Nov 25 23:12:37 crc kubenswrapper[5045]: I1125 23:12:37.914782 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv" event={"ID":"95a24d51-cffe-4618-b4f0-324815a1848e","Type":"ContainerDied","Data":"9c5213c19d3634f8ab25204d654d5823206ca14a1abe81e0b88d868eb8f55cd2"} Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.313870 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-gmw7c_2be9acdf-dc72-44e0-8674-ea5ba59cbbaa/console/0.log" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.314134 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.487491 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-console-serving-cert\") pod \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.487686 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-oauth-serving-cert\") pod \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.487783 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fk57t\" (UniqueName: \"kubernetes.io/projected/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-kube-api-access-fk57t\") pod \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.487890 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-console-config\") pod \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.487938 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-service-ca\") pod \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.488033 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-console-oauth-config\") pod \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.488117 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-trusted-ca-bundle\") pod \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\" (UID: \"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa\") " Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.489953 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "2be9acdf-dc72-44e0-8674-ea5ba59cbbaa" (UID: "2be9acdf-dc72-44e0-8674-ea5ba59cbbaa"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.490083 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "2be9acdf-dc72-44e0-8674-ea5ba59cbbaa" (UID: "2be9acdf-dc72-44e0-8674-ea5ba59cbbaa"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.490109 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-service-ca" (OuterVolumeSpecName: "service-ca") pod "2be9acdf-dc72-44e0-8674-ea5ba59cbbaa" (UID: "2be9acdf-dc72-44e0-8674-ea5ba59cbbaa"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.490377 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-console-config" (OuterVolumeSpecName: "console-config") pod "2be9acdf-dc72-44e0-8674-ea5ba59cbbaa" (UID: "2be9acdf-dc72-44e0-8674-ea5ba59cbbaa"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.496231 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "2be9acdf-dc72-44e0-8674-ea5ba59cbbaa" (UID: "2be9acdf-dc72-44e0-8674-ea5ba59cbbaa"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.496618 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "2be9acdf-dc72-44e0-8674-ea5ba59cbbaa" (UID: "2be9acdf-dc72-44e0-8674-ea5ba59cbbaa"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.496853 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-kube-api-access-fk57t" (OuterVolumeSpecName: "kube-api-access-fk57t") pod "2be9acdf-dc72-44e0-8674-ea5ba59cbbaa" (UID: "2be9acdf-dc72-44e0-8674-ea5ba59cbbaa"). InnerVolumeSpecName "kube-api-access-fk57t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.590092 5045 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.590154 5045 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.590173 5045 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.590192 5045 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.590211 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fk57t\" (UniqueName: \"kubernetes.io/projected/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-kube-api-access-fk57t\") on node \"crc\" DevicePath \"\"" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.590231 5045 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.590251 5045 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.923519 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-gmw7c_2be9acdf-dc72-44e0-8674-ea5ba59cbbaa/console/0.log" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.924539 5045 generic.go:334] "Generic (PLEG): container finished" podID="2be9acdf-dc72-44e0-8674-ea5ba59cbbaa" containerID="11c1ba57d78738f2fcfc0a708136b693c7ea7633863ad97c89af909d3272d625" exitCode=2 Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.924615 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-gmw7c" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.924627 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-gmw7c" event={"ID":"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa","Type":"ContainerDied","Data":"11c1ba57d78738f2fcfc0a708136b693c7ea7633863ad97c89af909d3272d625"} Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.925079 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-gmw7c" event={"ID":"2be9acdf-dc72-44e0-8674-ea5ba59cbbaa","Type":"ContainerDied","Data":"3f4957bb5cfdb80bb8ab8923d51c7707aecdf1880ce21782a2759daa02b47e29"} Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.925115 5045 scope.go:117] "RemoveContainer" containerID="11c1ba57d78738f2fcfc0a708136b693c7ea7633863ad97c89af909d3272d625" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.947058 5045 scope.go:117] "RemoveContainer" containerID="11c1ba57d78738f2fcfc0a708136b693c7ea7633863ad97c89af909d3272d625" Nov 25 23:12:38 crc kubenswrapper[5045]: E1125 23:12:38.947611 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11c1ba57d78738f2fcfc0a708136b693c7ea7633863ad97c89af909d3272d625\": container with ID starting with 11c1ba57d78738f2fcfc0a708136b693c7ea7633863ad97c89af909d3272d625 not found: ID does not exist" containerID="11c1ba57d78738f2fcfc0a708136b693c7ea7633863ad97c89af909d3272d625" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.947668 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11c1ba57d78738f2fcfc0a708136b693c7ea7633863ad97c89af909d3272d625"} err="failed to get container status \"11c1ba57d78738f2fcfc0a708136b693c7ea7633863ad97c89af909d3272d625\": rpc error: code = NotFound desc = could not find container \"11c1ba57d78738f2fcfc0a708136b693c7ea7633863ad97c89af909d3272d625\": container with ID starting with 11c1ba57d78738f2fcfc0a708136b693c7ea7633863ad97c89af909d3272d625 not found: ID does not exist" Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.978491 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-gmw7c"] Nov 25 23:12:38 crc kubenswrapper[5045]: I1125 23:12:38.984803 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-gmw7c"] Nov 25 23:12:39 crc kubenswrapper[5045]: I1125 23:12:39.933815 5045 generic.go:334] "Generic (PLEG): container finished" podID="95a24d51-cffe-4618-b4f0-324815a1848e" containerID="2701f066163dbb60b32382e89dc897785239e97581d720b05fdf07bcbc2d3072" exitCode=0 Nov 25 23:12:39 crc kubenswrapper[5045]: I1125 23:12:39.933917 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv" event={"ID":"95a24d51-cffe-4618-b4f0-324815a1848e","Type":"ContainerDied","Data":"2701f066163dbb60b32382e89dc897785239e97581d720b05fdf07bcbc2d3072"} Nov 25 23:12:40 crc kubenswrapper[5045]: I1125 23:12:40.406442 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2be9acdf-dc72-44e0-8674-ea5ba59cbbaa" path="/var/lib/kubelet/pods/2be9acdf-dc72-44e0-8674-ea5ba59cbbaa/volumes" Nov 25 23:12:40 crc kubenswrapper[5045]: I1125 23:12:40.946475 5045 generic.go:334] "Generic (PLEG): container finished" podID="95a24d51-cffe-4618-b4f0-324815a1848e" containerID="d200cf860e93f866105b60af563fabc9649fb9af217666c98e96138e3073441d" exitCode=0 Nov 25 23:12:40 crc kubenswrapper[5045]: I1125 23:12:40.946540 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv" event={"ID":"95a24d51-cffe-4618-b4f0-324815a1848e","Type":"ContainerDied","Data":"d200cf860e93f866105b60af563fabc9649fb9af217666c98e96138e3073441d"} Nov 25 23:12:42 crc kubenswrapper[5045]: I1125 23:12:42.231249 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv" Nov 25 23:12:42 crc kubenswrapper[5045]: I1125 23:12:42.338532 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/95a24d51-cffe-4618-b4f0-324815a1848e-util\") pod \"95a24d51-cffe-4618-b4f0-324815a1848e\" (UID: \"95a24d51-cffe-4618-b4f0-324815a1848e\") " Nov 25 23:12:42 crc kubenswrapper[5045]: I1125 23:12:42.338662 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/95a24d51-cffe-4618-b4f0-324815a1848e-bundle\") pod \"95a24d51-cffe-4618-b4f0-324815a1848e\" (UID: \"95a24d51-cffe-4618-b4f0-324815a1848e\") " Nov 25 23:12:42 crc kubenswrapper[5045]: I1125 23:12:42.338747 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2pgd\" (UniqueName: \"kubernetes.io/projected/95a24d51-cffe-4618-b4f0-324815a1848e-kube-api-access-t2pgd\") pod \"95a24d51-cffe-4618-b4f0-324815a1848e\" (UID: \"95a24d51-cffe-4618-b4f0-324815a1848e\") " Nov 25 23:12:42 crc kubenswrapper[5045]: I1125 23:12:42.340945 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95a24d51-cffe-4618-b4f0-324815a1848e-bundle" (OuterVolumeSpecName: "bundle") pod "95a24d51-cffe-4618-b4f0-324815a1848e" (UID: "95a24d51-cffe-4618-b4f0-324815a1848e"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:12:42 crc kubenswrapper[5045]: I1125 23:12:42.343857 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95a24d51-cffe-4618-b4f0-324815a1848e-kube-api-access-t2pgd" (OuterVolumeSpecName: "kube-api-access-t2pgd") pod "95a24d51-cffe-4618-b4f0-324815a1848e" (UID: "95a24d51-cffe-4618-b4f0-324815a1848e"). InnerVolumeSpecName "kube-api-access-t2pgd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:12:42 crc kubenswrapper[5045]: I1125 23:12:42.440158 5045 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/95a24d51-cffe-4618-b4f0-324815a1848e-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:12:42 crc kubenswrapper[5045]: I1125 23:12:42.440672 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2pgd\" (UniqueName: \"kubernetes.io/projected/95a24d51-cffe-4618-b4f0-324815a1848e-kube-api-access-t2pgd\") on node \"crc\" DevicePath \"\"" Nov 25 23:12:42 crc kubenswrapper[5045]: I1125 23:12:42.507870 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95a24d51-cffe-4618-b4f0-324815a1848e-util" (OuterVolumeSpecName: "util") pod "95a24d51-cffe-4618-b4f0-324815a1848e" (UID: "95a24d51-cffe-4618-b4f0-324815a1848e"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:12:42 crc kubenswrapper[5045]: I1125 23:12:42.541620 5045 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/95a24d51-cffe-4618-b4f0-324815a1848e-util\") on node \"crc\" DevicePath \"\"" Nov 25 23:12:42 crc kubenswrapper[5045]: I1125 23:12:42.964050 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv" event={"ID":"95a24d51-cffe-4618-b4f0-324815a1848e","Type":"ContainerDied","Data":"523877d0d565781753a237dd87d042df16b6e47638466b3aa1c7537ff26acc46"} Nov 25 23:12:42 crc kubenswrapper[5045]: I1125 23:12:42.964108 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv" Nov 25 23:12:42 crc kubenswrapper[5045]: I1125 23:12:42.964115 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="523877d0d565781753a237dd87d042df16b6e47638466b3aa1c7537ff26acc46" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.222823 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-796ffbd7cd-282k4"] Nov 25 23:12:51 crc kubenswrapper[5045]: E1125 23:12:51.224369 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95a24d51-cffe-4618-b4f0-324815a1848e" containerName="util" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.224451 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="95a24d51-cffe-4618-b4f0-324815a1848e" containerName="util" Nov 25 23:12:51 crc kubenswrapper[5045]: E1125 23:12:51.224504 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2be9acdf-dc72-44e0-8674-ea5ba59cbbaa" containerName="console" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.224550 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="2be9acdf-dc72-44e0-8674-ea5ba59cbbaa" containerName="console" Nov 25 23:12:51 crc kubenswrapper[5045]: E1125 23:12:51.224606 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95a24d51-cffe-4618-b4f0-324815a1848e" containerName="extract" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.224682 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="95a24d51-cffe-4618-b4f0-324815a1848e" containerName="extract" Nov 25 23:12:51 crc kubenswrapper[5045]: E1125 23:12:51.224768 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95a24d51-cffe-4618-b4f0-324815a1848e" containerName="pull" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.224830 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="95a24d51-cffe-4618-b4f0-324815a1848e" containerName="pull" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.224987 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="95a24d51-cffe-4618-b4f0-324815a1848e" containerName="extract" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.225050 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="2be9acdf-dc72-44e0-8674-ea5ba59cbbaa" containerName="console" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.225482 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-796ffbd7cd-282k4" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.228315 5045 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.228404 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.228500 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.233871 5045 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.233950 5045 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-7fgnk" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.240841 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-796ffbd7cd-282k4"] Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.346747 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6nd6n\" (UniqueName: \"kubernetes.io/projected/211004af-dcb4-4397-bced-fd0c3e3da2a3-kube-api-access-6nd6n\") pod \"metallb-operator-controller-manager-796ffbd7cd-282k4\" (UID: \"211004af-dcb4-4397-bced-fd0c3e3da2a3\") " pod="metallb-system/metallb-operator-controller-manager-796ffbd7cd-282k4" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.346827 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/211004af-dcb4-4397-bced-fd0c3e3da2a3-apiservice-cert\") pod \"metallb-operator-controller-manager-796ffbd7cd-282k4\" (UID: \"211004af-dcb4-4397-bced-fd0c3e3da2a3\") " pod="metallb-system/metallb-operator-controller-manager-796ffbd7cd-282k4" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.346857 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/211004af-dcb4-4397-bced-fd0c3e3da2a3-webhook-cert\") pod \"metallb-operator-controller-manager-796ffbd7cd-282k4\" (UID: \"211004af-dcb4-4397-bced-fd0c3e3da2a3\") " pod="metallb-system/metallb-operator-controller-manager-796ffbd7cd-282k4" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.353993 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-7cfb757c46-6g84d"] Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.354839 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7cfb757c46-6g84d" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.356340 5045 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.356537 5045 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.356540 5045 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-q5rgl" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.380557 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7cfb757c46-6g84d"] Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.447820 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/211004af-dcb4-4397-bced-fd0c3e3da2a3-apiservice-cert\") pod \"metallb-operator-controller-manager-796ffbd7cd-282k4\" (UID: \"211004af-dcb4-4397-bced-fd0c3e3da2a3\") " pod="metallb-system/metallb-operator-controller-manager-796ffbd7cd-282k4" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.447869 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/211004af-dcb4-4397-bced-fd0c3e3da2a3-webhook-cert\") pod \"metallb-operator-controller-manager-796ffbd7cd-282k4\" (UID: \"211004af-dcb4-4397-bced-fd0c3e3da2a3\") " pod="metallb-system/metallb-operator-controller-manager-796ffbd7cd-282k4" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.447892 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/aa1a672e-f330-4cef-bf0f-c471b30ac61d-apiservice-cert\") pod \"metallb-operator-webhook-server-7cfb757c46-6g84d\" (UID: \"aa1a672e-f330-4cef-bf0f-c471b30ac61d\") " pod="metallb-system/metallb-operator-webhook-server-7cfb757c46-6g84d" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.447917 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/aa1a672e-f330-4cef-bf0f-c471b30ac61d-webhook-cert\") pod \"metallb-operator-webhook-server-7cfb757c46-6g84d\" (UID: \"aa1a672e-f330-4cef-bf0f-c471b30ac61d\") " pod="metallb-system/metallb-operator-webhook-server-7cfb757c46-6g84d" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.447941 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gcrlp\" (UniqueName: \"kubernetes.io/projected/aa1a672e-f330-4cef-bf0f-c471b30ac61d-kube-api-access-gcrlp\") pod \"metallb-operator-webhook-server-7cfb757c46-6g84d\" (UID: \"aa1a672e-f330-4cef-bf0f-c471b30ac61d\") " pod="metallb-system/metallb-operator-webhook-server-7cfb757c46-6g84d" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.448012 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6nd6n\" (UniqueName: \"kubernetes.io/projected/211004af-dcb4-4397-bced-fd0c3e3da2a3-kube-api-access-6nd6n\") pod \"metallb-operator-controller-manager-796ffbd7cd-282k4\" (UID: \"211004af-dcb4-4397-bced-fd0c3e3da2a3\") " pod="metallb-system/metallb-operator-controller-manager-796ffbd7cd-282k4" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.453964 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/211004af-dcb4-4397-bced-fd0c3e3da2a3-apiservice-cert\") pod \"metallb-operator-controller-manager-796ffbd7cd-282k4\" (UID: \"211004af-dcb4-4397-bced-fd0c3e3da2a3\") " pod="metallb-system/metallb-operator-controller-manager-796ffbd7cd-282k4" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.453973 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/211004af-dcb4-4397-bced-fd0c3e3da2a3-webhook-cert\") pod \"metallb-operator-controller-manager-796ffbd7cd-282k4\" (UID: \"211004af-dcb4-4397-bced-fd0c3e3da2a3\") " pod="metallb-system/metallb-operator-controller-manager-796ffbd7cd-282k4" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.517764 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6nd6n\" (UniqueName: \"kubernetes.io/projected/211004af-dcb4-4397-bced-fd0c3e3da2a3-kube-api-access-6nd6n\") pod \"metallb-operator-controller-manager-796ffbd7cd-282k4\" (UID: \"211004af-dcb4-4397-bced-fd0c3e3da2a3\") " pod="metallb-system/metallb-operator-controller-manager-796ffbd7cd-282k4" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.545623 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-796ffbd7cd-282k4" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.548948 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gcrlp\" (UniqueName: \"kubernetes.io/projected/aa1a672e-f330-4cef-bf0f-c471b30ac61d-kube-api-access-gcrlp\") pod \"metallb-operator-webhook-server-7cfb757c46-6g84d\" (UID: \"aa1a672e-f330-4cef-bf0f-c471b30ac61d\") " pod="metallb-system/metallb-operator-webhook-server-7cfb757c46-6g84d" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.549077 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/aa1a672e-f330-4cef-bf0f-c471b30ac61d-apiservice-cert\") pod \"metallb-operator-webhook-server-7cfb757c46-6g84d\" (UID: \"aa1a672e-f330-4cef-bf0f-c471b30ac61d\") " pod="metallb-system/metallb-operator-webhook-server-7cfb757c46-6g84d" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.549116 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/aa1a672e-f330-4cef-bf0f-c471b30ac61d-webhook-cert\") pod \"metallb-operator-webhook-server-7cfb757c46-6g84d\" (UID: \"aa1a672e-f330-4cef-bf0f-c471b30ac61d\") " pod="metallb-system/metallb-operator-webhook-server-7cfb757c46-6g84d" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.554300 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/aa1a672e-f330-4cef-bf0f-c471b30ac61d-webhook-cert\") pod \"metallb-operator-webhook-server-7cfb757c46-6g84d\" (UID: \"aa1a672e-f330-4cef-bf0f-c471b30ac61d\") " pod="metallb-system/metallb-operator-webhook-server-7cfb757c46-6g84d" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.554520 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/aa1a672e-f330-4cef-bf0f-c471b30ac61d-apiservice-cert\") pod \"metallb-operator-webhook-server-7cfb757c46-6g84d\" (UID: \"aa1a672e-f330-4cef-bf0f-c471b30ac61d\") " pod="metallb-system/metallb-operator-webhook-server-7cfb757c46-6g84d" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.581436 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gcrlp\" (UniqueName: \"kubernetes.io/projected/aa1a672e-f330-4cef-bf0f-c471b30ac61d-kube-api-access-gcrlp\") pod \"metallb-operator-webhook-server-7cfb757c46-6g84d\" (UID: \"aa1a672e-f330-4cef-bf0f-c471b30ac61d\") " pod="metallb-system/metallb-operator-webhook-server-7cfb757c46-6g84d" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.673762 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7cfb757c46-6g84d" Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.903227 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7cfb757c46-6g84d"] Nov 25 23:12:51 crc kubenswrapper[5045]: W1125 23:12:51.905642 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaa1a672e_f330_4cef_bf0f_c471b30ac61d.slice/crio-c08cb2c822e9c68e5b1ea9be28a9ef797022c6a42d0cf79b5faa483f8d08300d WatchSource:0}: Error finding container c08cb2c822e9c68e5b1ea9be28a9ef797022c6a42d0cf79b5faa483f8d08300d: Status 404 returned error can't find the container with id c08cb2c822e9c68e5b1ea9be28a9ef797022c6a42d0cf79b5faa483f8d08300d Nov 25 23:12:51 crc kubenswrapper[5045]: I1125 23:12:51.953002 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-796ffbd7cd-282k4"] Nov 25 23:12:51 crc kubenswrapper[5045]: W1125 23:12:51.954727 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod211004af_dcb4_4397_bced_fd0c3e3da2a3.slice/crio-4c58f1b848539e738d10868da3247f3db6349340bffa8f4fc6379b69985e0913 WatchSource:0}: Error finding container 4c58f1b848539e738d10868da3247f3db6349340bffa8f4fc6379b69985e0913: Status 404 returned error can't find the container with id 4c58f1b848539e738d10868da3247f3db6349340bffa8f4fc6379b69985e0913 Nov 25 23:12:52 crc kubenswrapper[5045]: I1125 23:12:52.009087 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7cfb757c46-6g84d" event={"ID":"aa1a672e-f330-4cef-bf0f-c471b30ac61d","Type":"ContainerStarted","Data":"c08cb2c822e9c68e5b1ea9be28a9ef797022c6a42d0cf79b5faa483f8d08300d"} Nov 25 23:12:52 crc kubenswrapper[5045]: I1125 23:12:52.010194 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-796ffbd7cd-282k4" event={"ID":"211004af-dcb4-4397-bced-fd0c3e3da2a3","Type":"ContainerStarted","Data":"4c58f1b848539e738d10868da3247f3db6349340bffa8f4fc6379b69985e0913"} Nov 25 23:12:58 crc kubenswrapper[5045]: I1125 23:12:58.060001 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7cfb757c46-6g84d" event={"ID":"aa1a672e-f330-4cef-bf0f-c471b30ac61d","Type":"ContainerStarted","Data":"c6804411918e75961d94b78f72d08cd2fcfdfd3eec64e3c002b0cf95601d0436"} Nov 25 23:12:58 crc kubenswrapper[5045]: I1125 23:12:58.060540 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-7cfb757c46-6g84d" Nov 25 23:12:58 crc kubenswrapper[5045]: I1125 23:12:58.062478 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-796ffbd7cd-282k4" event={"ID":"211004af-dcb4-4397-bced-fd0c3e3da2a3","Type":"ContainerStarted","Data":"a4f99680690a6020979c3b746c31c7b6eb128917677fe340ea7a66a7129e435f"} Nov 25 23:12:58 crc kubenswrapper[5045]: I1125 23:12:58.062588 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-796ffbd7cd-282k4" Nov 25 23:12:58 crc kubenswrapper[5045]: I1125 23:12:58.080131 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-7cfb757c46-6g84d" podStartSLOduration=2.153178717 podStartE2EDuration="7.080111902s" podCreationTimestamp="2025-11-25 23:12:51 +0000 UTC" firstStartedPulling="2025-11-25 23:12:51.907382528 +0000 UTC m=+828.265041640" lastFinishedPulling="2025-11-25 23:12:56.834315713 +0000 UTC m=+833.191974825" observedRunningTime="2025-11-25 23:12:58.075946901 +0000 UTC m=+834.433606063" watchObservedRunningTime="2025-11-25 23:12:58.080111902 +0000 UTC m=+834.437771014" Nov 25 23:12:58 crc kubenswrapper[5045]: I1125 23:12:58.107604 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-796ffbd7cd-282k4" podStartSLOduration=2.254415022 podStartE2EDuration="7.107577398s" podCreationTimestamp="2025-11-25 23:12:51 +0000 UTC" firstStartedPulling="2025-11-25 23:12:51.957900543 +0000 UTC m=+828.315559655" lastFinishedPulling="2025-11-25 23:12:56.811062919 +0000 UTC m=+833.168722031" observedRunningTime="2025-11-25 23:12:58.106039994 +0000 UTC m=+834.463699106" watchObservedRunningTime="2025-11-25 23:12:58.107577398 +0000 UTC m=+834.465236550" Nov 25 23:13:11 crc kubenswrapper[5045]: I1125 23:13:11.684054 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-7cfb757c46-6g84d" Nov 25 23:13:31 crc kubenswrapper[5045]: I1125 23:13:31.549531 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-796ffbd7cd-282k4" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.316368 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-p6hjd"] Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.320367 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.322387 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-5l6nm"] Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.323279 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5l6nm" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.327147 5045 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.327225 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.327294 5045 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-lhqbk" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.327990 5045 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.333544 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-5l6nm"] Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.403366 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-sbr7m"] Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.404373 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-sbr7m" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.406367 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.406449 5045 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.406489 5045 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-bmsv5" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.407019 5045 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.421450 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-4g95c"] Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.422494 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-4g95c" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.423828 5045 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.431196 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-4g95c"] Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.448451 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-metrics-certs\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.448487 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9c4acc23-28a7-432a-8a17-63550727f1a6-cert\") pod \"frr-k8s-webhook-server-6998585d5-5l6nm\" (UID: \"9c4acc23-28a7-432a-8a17-63550727f1a6\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-5l6nm" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.448506 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fd6n\" (UniqueName: \"kubernetes.io/projected/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-kube-api-access-7fd6n\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.448523 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-frr-startup\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.448661 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-frr-conf\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.448701 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-reloader\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.448784 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-frr-sockets\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.448878 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-metrics\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.448968 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwf7n\" (UniqueName: \"kubernetes.io/projected/9c4acc23-28a7-432a-8a17-63550727f1a6-kube-api-access-fwf7n\") pod \"frr-k8s-webhook-server-6998585d5-5l6nm\" (UID: \"9c4acc23-28a7-432a-8a17-63550727f1a6\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-5l6nm" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.550385 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e22db1cb-76eb-4541-8098-95b688ccbe00-metrics-certs\") pod \"controller-6c7b4b5f48-4g95c\" (UID: \"e22db1cb-76eb-4541-8098-95b688ccbe00\") " pod="metallb-system/controller-6c7b4b5f48-4g95c" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.550461 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qcpn9\" (UniqueName: \"kubernetes.io/projected/e22db1cb-76eb-4541-8098-95b688ccbe00-kube-api-access-qcpn9\") pod \"controller-6c7b4b5f48-4g95c\" (UID: \"e22db1cb-76eb-4541-8098-95b688ccbe00\") " pod="metallb-system/controller-6c7b4b5f48-4g95c" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.550513 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fj4kj\" (UniqueName: \"kubernetes.io/projected/fcd4cd5e-d7b9-4666-8eac-781cee36189a-kube-api-access-fj4kj\") pod \"speaker-sbr7m\" (UID: \"fcd4cd5e-d7b9-4666-8eac-781cee36189a\") " pod="metallb-system/speaker-sbr7m" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.550674 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e22db1cb-76eb-4541-8098-95b688ccbe00-cert\") pod \"controller-6c7b4b5f48-4g95c\" (UID: \"e22db1cb-76eb-4541-8098-95b688ccbe00\") " pod="metallb-system/controller-6c7b4b5f48-4g95c" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.550738 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-metrics-certs\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.550795 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9c4acc23-28a7-432a-8a17-63550727f1a6-cert\") pod \"frr-k8s-webhook-server-6998585d5-5l6nm\" (UID: \"9c4acc23-28a7-432a-8a17-63550727f1a6\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-5l6nm" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.550829 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fd6n\" (UniqueName: \"kubernetes.io/projected/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-kube-api-access-7fd6n\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.550862 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fcd4cd5e-d7b9-4666-8eac-781cee36189a-metrics-certs\") pod \"speaker-sbr7m\" (UID: \"fcd4cd5e-d7b9-4666-8eac-781cee36189a\") " pod="metallb-system/speaker-sbr7m" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.550895 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-frr-startup\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.550976 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-frr-conf\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.551021 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-reloader\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.551052 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/fcd4cd5e-d7b9-4666-8eac-781cee36189a-memberlist\") pod \"speaker-sbr7m\" (UID: \"fcd4cd5e-d7b9-4666-8eac-781cee36189a\") " pod="metallb-system/speaker-sbr7m" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.551106 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/fcd4cd5e-d7b9-4666-8eac-781cee36189a-metallb-excludel2\") pod \"speaker-sbr7m\" (UID: \"fcd4cd5e-d7b9-4666-8eac-781cee36189a\") " pod="metallb-system/speaker-sbr7m" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.551206 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-frr-sockets\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.551278 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-metrics\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.551383 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwf7n\" (UniqueName: \"kubernetes.io/projected/9c4acc23-28a7-432a-8a17-63550727f1a6-kube-api-access-fwf7n\") pod \"frr-k8s-webhook-server-6998585d5-5l6nm\" (UID: \"9c4acc23-28a7-432a-8a17-63550727f1a6\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-5l6nm" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.551688 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-frr-sockets\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.551762 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-metrics\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.551879 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-frr-conf\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.551954 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-reloader\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.551999 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-frr-startup\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.556427 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-metrics-certs\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.561844 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9c4acc23-28a7-432a-8a17-63550727f1a6-cert\") pod \"frr-k8s-webhook-server-6998585d5-5l6nm\" (UID: \"9c4acc23-28a7-432a-8a17-63550727f1a6\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-5l6nm" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.573196 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fd6n\" (UniqueName: \"kubernetes.io/projected/6bab5b2e-6c1e-4464-b7d8-973e76401ba5-kube-api-access-7fd6n\") pod \"frr-k8s-p6hjd\" (UID: \"6bab5b2e-6c1e-4464-b7d8-973e76401ba5\") " pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.577446 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwf7n\" (UniqueName: \"kubernetes.io/projected/9c4acc23-28a7-432a-8a17-63550727f1a6-kube-api-access-fwf7n\") pod \"frr-k8s-webhook-server-6998585d5-5l6nm\" (UID: \"9c4acc23-28a7-432a-8a17-63550727f1a6\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-5l6nm" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.637928 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.644448 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5l6nm" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.653044 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e22db1cb-76eb-4541-8098-95b688ccbe00-metrics-certs\") pod \"controller-6c7b4b5f48-4g95c\" (UID: \"e22db1cb-76eb-4541-8098-95b688ccbe00\") " pod="metallb-system/controller-6c7b4b5f48-4g95c" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.653081 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qcpn9\" (UniqueName: \"kubernetes.io/projected/e22db1cb-76eb-4541-8098-95b688ccbe00-kube-api-access-qcpn9\") pod \"controller-6c7b4b5f48-4g95c\" (UID: \"e22db1cb-76eb-4541-8098-95b688ccbe00\") " pod="metallb-system/controller-6c7b4b5f48-4g95c" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.653123 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fj4kj\" (UniqueName: \"kubernetes.io/projected/fcd4cd5e-d7b9-4666-8eac-781cee36189a-kube-api-access-fj4kj\") pod \"speaker-sbr7m\" (UID: \"fcd4cd5e-d7b9-4666-8eac-781cee36189a\") " pod="metallb-system/speaker-sbr7m" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.653155 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e22db1cb-76eb-4541-8098-95b688ccbe00-cert\") pod \"controller-6c7b4b5f48-4g95c\" (UID: \"e22db1cb-76eb-4541-8098-95b688ccbe00\") " pod="metallb-system/controller-6c7b4b5f48-4g95c" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.653190 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fcd4cd5e-d7b9-4666-8eac-781cee36189a-metrics-certs\") pod \"speaker-sbr7m\" (UID: \"fcd4cd5e-d7b9-4666-8eac-781cee36189a\") " pod="metallb-system/speaker-sbr7m" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.653218 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/fcd4cd5e-d7b9-4666-8eac-781cee36189a-memberlist\") pod \"speaker-sbr7m\" (UID: \"fcd4cd5e-d7b9-4666-8eac-781cee36189a\") " pod="metallb-system/speaker-sbr7m" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.653240 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/fcd4cd5e-d7b9-4666-8eac-781cee36189a-metallb-excludel2\") pod \"speaker-sbr7m\" (UID: \"fcd4cd5e-d7b9-4666-8eac-781cee36189a\") " pod="metallb-system/speaker-sbr7m" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.653976 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/fcd4cd5e-d7b9-4666-8eac-781cee36189a-metallb-excludel2\") pod \"speaker-sbr7m\" (UID: \"fcd4cd5e-d7b9-4666-8eac-781cee36189a\") " pod="metallb-system/speaker-sbr7m" Nov 25 23:13:32 crc kubenswrapper[5045]: E1125 23:13:32.654917 5045 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 23:13:32 crc kubenswrapper[5045]: E1125 23:13:32.654973 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fcd4cd5e-d7b9-4666-8eac-781cee36189a-memberlist podName:fcd4cd5e-d7b9-4666-8eac-781cee36189a nodeName:}" failed. No retries permitted until 2025-11-25 23:13:33.154953489 +0000 UTC m=+869.512612611 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/fcd4cd5e-d7b9-4666-8eac-781cee36189a-memberlist") pod "speaker-sbr7m" (UID: "fcd4cd5e-d7b9-4666-8eac-781cee36189a") : secret "metallb-memberlist" not found Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.656768 5045 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.659459 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e22db1cb-76eb-4541-8098-95b688ccbe00-metrics-certs\") pod \"controller-6c7b4b5f48-4g95c\" (UID: \"e22db1cb-76eb-4541-8098-95b688ccbe00\") " pod="metallb-system/controller-6c7b4b5f48-4g95c" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.659776 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fcd4cd5e-d7b9-4666-8eac-781cee36189a-metrics-certs\") pod \"speaker-sbr7m\" (UID: \"fcd4cd5e-d7b9-4666-8eac-781cee36189a\") " pod="metallb-system/speaker-sbr7m" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.667661 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e22db1cb-76eb-4541-8098-95b688ccbe00-cert\") pod \"controller-6c7b4b5f48-4g95c\" (UID: \"e22db1cb-76eb-4541-8098-95b688ccbe00\") " pod="metallb-system/controller-6c7b4b5f48-4g95c" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.672347 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fj4kj\" (UniqueName: \"kubernetes.io/projected/fcd4cd5e-d7b9-4666-8eac-781cee36189a-kube-api-access-fj4kj\") pod \"speaker-sbr7m\" (UID: \"fcd4cd5e-d7b9-4666-8eac-781cee36189a\") " pod="metallb-system/speaker-sbr7m" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.686523 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcpn9\" (UniqueName: \"kubernetes.io/projected/e22db1cb-76eb-4541-8098-95b688ccbe00-kube-api-access-qcpn9\") pod \"controller-6c7b4b5f48-4g95c\" (UID: \"e22db1cb-76eb-4541-8098-95b688ccbe00\") " pod="metallb-system/controller-6c7b4b5f48-4g95c" Nov 25 23:13:32 crc kubenswrapper[5045]: I1125 23:13:32.735206 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-4g95c" Nov 25 23:13:33 crc kubenswrapper[5045]: I1125 23:13:33.076130 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-5l6nm"] Nov 25 23:13:33 crc kubenswrapper[5045]: W1125 23:13:33.081597 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9c4acc23_28a7_432a_8a17_63550727f1a6.slice/crio-e813757427224f5beb793137b3cad7ebcf070cafacf79d095a6219d2a251da22 WatchSource:0}: Error finding container e813757427224f5beb793137b3cad7ebcf070cafacf79d095a6219d2a251da22: Status 404 returned error can't find the container with id e813757427224f5beb793137b3cad7ebcf070cafacf79d095a6219d2a251da22 Nov 25 23:13:33 crc kubenswrapper[5045]: I1125 23:13:33.137402 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-4g95c"] Nov 25 23:13:33 crc kubenswrapper[5045]: W1125 23:13:33.139559 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode22db1cb_76eb_4541_8098_95b688ccbe00.slice/crio-9a79348b3b1763c30f6cdb15d468ee70f37dafd000eef6db1d28cd3ca96b864a WatchSource:0}: Error finding container 9a79348b3b1763c30f6cdb15d468ee70f37dafd000eef6db1d28cd3ca96b864a: Status 404 returned error can't find the container with id 9a79348b3b1763c30f6cdb15d468ee70f37dafd000eef6db1d28cd3ca96b864a Nov 25 23:13:33 crc kubenswrapper[5045]: I1125 23:13:33.159848 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/fcd4cd5e-d7b9-4666-8eac-781cee36189a-memberlist\") pod \"speaker-sbr7m\" (UID: \"fcd4cd5e-d7b9-4666-8eac-781cee36189a\") " pod="metallb-system/speaker-sbr7m" Nov 25 23:13:33 crc kubenswrapper[5045]: E1125 23:13:33.159977 5045 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 23:13:33 crc kubenswrapper[5045]: E1125 23:13:33.160018 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fcd4cd5e-d7b9-4666-8eac-781cee36189a-memberlist podName:fcd4cd5e-d7b9-4666-8eac-781cee36189a nodeName:}" failed. No retries permitted until 2025-11-25 23:13:34.160005434 +0000 UTC m=+870.517664546 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/fcd4cd5e-d7b9-4666-8eac-781cee36189a-memberlist") pod "speaker-sbr7m" (UID: "fcd4cd5e-d7b9-4666-8eac-781cee36189a") : secret "metallb-memberlist" not found Nov 25 23:13:33 crc kubenswrapper[5045]: I1125 23:13:33.306327 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5l6nm" event={"ID":"9c4acc23-28a7-432a-8a17-63550727f1a6","Type":"ContainerStarted","Data":"e813757427224f5beb793137b3cad7ebcf070cafacf79d095a6219d2a251da22"} Nov 25 23:13:33 crc kubenswrapper[5045]: I1125 23:13:33.308334 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-p6hjd" event={"ID":"6bab5b2e-6c1e-4464-b7d8-973e76401ba5","Type":"ContainerStarted","Data":"dea396f705b905ed9e5c1c9c97c9a4edb2b6bf40bf79e7a5867b41fd565f65b3"} Nov 25 23:13:33 crc kubenswrapper[5045]: I1125 23:13:33.310024 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-4g95c" event={"ID":"e22db1cb-76eb-4541-8098-95b688ccbe00","Type":"ContainerStarted","Data":"dcd42f642f67efde80a255c5996c12a91af7831d65280d77e571ff5368662a06"} Nov 25 23:13:33 crc kubenswrapper[5045]: I1125 23:13:33.310059 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-4g95c" event={"ID":"e22db1cb-76eb-4541-8098-95b688ccbe00","Type":"ContainerStarted","Data":"9a79348b3b1763c30f6cdb15d468ee70f37dafd000eef6db1d28cd3ca96b864a"} Nov 25 23:13:34 crc kubenswrapper[5045]: I1125 23:13:34.176578 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/fcd4cd5e-d7b9-4666-8eac-781cee36189a-memberlist\") pod \"speaker-sbr7m\" (UID: \"fcd4cd5e-d7b9-4666-8eac-781cee36189a\") " pod="metallb-system/speaker-sbr7m" Nov 25 23:13:34 crc kubenswrapper[5045]: I1125 23:13:34.203173 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/fcd4cd5e-d7b9-4666-8eac-781cee36189a-memberlist\") pod \"speaker-sbr7m\" (UID: \"fcd4cd5e-d7b9-4666-8eac-781cee36189a\") " pod="metallb-system/speaker-sbr7m" Nov 25 23:13:34 crc kubenswrapper[5045]: I1125 23:13:34.216700 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-sbr7m" Nov 25 23:13:34 crc kubenswrapper[5045]: W1125 23:13:34.270493 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfcd4cd5e_d7b9_4666_8eac_781cee36189a.slice/crio-d1a0c5100f23f8199a9c4be20dfa731b6d7a7872411d340f61832d8b381bf864 WatchSource:0}: Error finding container d1a0c5100f23f8199a9c4be20dfa731b6d7a7872411d340f61832d8b381bf864: Status 404 returned error can't find the container with id d1a0c5100f23f8199a9c4be20dfa731b6d7a7872411d340f61832d8b381bf864 Nov 25 23:13:34 crc kubenswrapper[5045]: I1125 23:13:34.316400 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-4g95c" event={"ID":"e22db1cb-76eb-4541-8098-95b688ccbe00","Type":"ContainerStarted","Data":"b4c712c0f0348d30c17e942d46203b2fdc5611f87b81dbad5f1747b8f7f25016"} Nov 25 23:13:34 crc kubenswrapper[5045]: I1125 23:13:34.317339 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-4g95c" Nov 25 23:13:34 crc kubenswrapper[5045]: I1125 23:13:34.320396 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-sbr7m" event={"ID":"fcd4cd5e-d7b9-4666-8eac-781cee36189a","Type":"ContainerStarted","Data":"d1a0c5100f23f8199a9c4be20dfa731b6d7a7872411d340f61832d8b381bf864"} Nov 25 23:13:34 crc kubenswrapper[5045]: I1125 23:13:34.428361 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-4g95c" podStartSLOduration=2.428346575 podStartE2EDuration="2.428346575s" podCreationTimestamp="2025-11-25 23:13:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:13:34.349091742 +0000 UTC m=+870.706750854" watchObservedRunningTime="2025-11-25 23:13:34.428346575 +0000 UTC m=+870.786005687" Nov 25 23:13:35 crc kubenswrapper[5045]: I1125 23:13:35.327174 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-sbr7m" event={"ID":"fcd4cd5e-d7b9-4666-8eac-781cee36189a","Type":"ContainerStarted","Data":"ddd8c08ea540e89b18aa1bce418d671dfcfc5fcdeb9ea4c2411bd85c1a5cb5a4"} Nov 25 23:13:35 crc kubenswrapper[5045]: I1125 23:13:35.327532 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-sbr7m" event={"ID":"fcd4cd5e-d7b9-4666-8eac-781cee36189a","Type":"ContainerStarted","Data":"dfa8d849deb5a607fd31a460003ad7fa082081add63c545eab9839516af0cd6d"} Nov 25 23:13:35 crc kubenswrapper[5045]: I1125 23:13:35.327557 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-sbr7m" Nov 25 23:13:35 crc kubenswrapper[5045]: I1125 23:13:35.345353 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-sbr7m" podStartSLOduration=3.3453339509999998 podStartE2EDuration="3.345333951s" podCreationTimestamp="2025-11-25 23:13:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:13:35.344893908 +0000 UTC m=+871.702553020" watchObservedRunningTime="2025-11-25 23:13:35.345333951 +0000 UTC m=+871.702993053" Nov 25 23:13:43 crc kubenswrapper[5045]: I1125 23:13:43.403912 5045 generic.go:334] "Generic (PLEG): container finished" podID="6bab5b2e-6c1e-4464-b7d8-973e76401ba5" containerID="d721207bc91eff6207353383cd02d43f3f5eb30f8a19c8a9ac90f4dd5f8b19e8" exitCode=0 Nov 25 23:13:43 crc kubenswrapper[5045]: I1125 23:13:43.403991 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-p6hjd" event={"ID":"6bab5b2e-6c1e-4464-b7d8-973e76401ba5","Type":"ContainerDied","Data":"d721207bc91eff6207353383cd02d43f3f5eb30f8a19c8a9ac90f4dd5f8b19e8"} Nov 25 23:13:43 crc kubenswrapper[5045]: I1125 23:13:43.409019 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5l6nm" event={"ID":"9c4acc23-28a7-432a-8a17-63550727f1a6","Type":"ContainerStarted","Data":"a3990d7a1ddca6707931f9a005f4e7ac269c3c7a91e74ee1f4eb1bbf873af6be"} Nov 25 23:13:43 crc kubenswrapper[5045]: I1125 23:13:43.409327 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5l6nm" Nov 25 23:13:43 crc kubenswrapper[5045]: I1125 23:13:43.461917 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5l6nm" podStartSLOduration=2.151986139 podStartE2EDuration="11.461885723s" podCreationTimestamp="2025-11-25 23:13:32 +0000 UTC" firstStartedPulling="2025-11-25 23:13:33.083959453 +0000 UTC m=+869.441618575" lastFinishedPulling="2025-11-25 23:13:42.393859037 +0000 UTC m=+878.751518159" observedRunningTime="2025-11-25 23:13:43.460461182 +0000 UTC m=+879.818120334" watchObservedRunningTime="2025-11-25 23:13:43.461885723 +0000 UTC m=+879.819544885" Nov 25 23:13:44 crc kubenswrapper[5045]: I1125 23:13:44.221534 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-sbr7m" Nov 25 23:13:44 crc kubenswrapper[5045]: I1125 23:13:44.429186 5045 generic.go:334] "Generic (PLEG): container finished" podID="6bab5b2e-6c1e-4464-b7d8-973e76401ba5" containerID="d52536363fbcd6fe27e18e785f9f87abe03ab3a03a262d649e04c06af5ea48a4" exitCode=0 Nov 25 23:13:44 crc kubenswrapper[5045]: I1125 23:13:44.429319 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-p6hjd" event={"ID":"6bab5b2e-6c1e-4464-b7d8-973e76401ba5","Type":"ContainerDied","Data":"d52536363fbcd6fe27e18e785f9f87abe03ab3a03a262d649e04c06af5ea48a4"} Nov 25 23:13:45 crc kubenswrapper[5045]: I1125 23:13:45.440697 5045 generic.go:334] "Generic (PLEG): container finished" podID="6bab5b2e-6c1e-4464-b7d8-973e76401ba5" containerID="e617e28739c0647677932ecbb14c08c3eca8d06f8711e3b3edcebd995cfb6591" exitCode=0 Nov 25 23:13:45 crc kubenswrapper[5045]: I1125 23:13:45.440848 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-p6hjd" event={"ID":"6bab5b2e-6c1e-4464-b7d8-973e76401ba5","Type":"ContainerDied","Data":"e617e28739c0647677932ecbb14c08c3eca8d06f8711e3b3edcebd995cfb6591"} Nov 25 23:13:46 crc kubenswrapper[5045]: I1125 23:13:46.450478 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-p6hjd" event={"ID":"6bab5b2e-6c1e-4464-b7d8-973e76401ba5","Type":"ContainerStarted","Data":"d85ff19c411c779ab2a85edfb5faae3ef84a0c78a6b40956918f31013ec0e12d"} Nov 25 23:13:46 crc kubenswrapper[5045]: I1125 23:13:46.450840 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-p6hjd" event={"ID":"6bab5b2e-6c1e-4464-b7d8-973e76401ba5","Type":"ContainerStarted","Data":"3dcfa20125d7e15864c898904b541f52ff9467f63a32a35dc38491d5a1f55ae9"} Nov 25 23:13:46 crc kubenswrapper[5045]: I1125 23:13:46.450857 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-p6hjd" event={"ID":"6bab5b2e-6c1e-4464-b7d8-973e76401ba5","Type":"ContainerStarted","Data":"9241be4b741da3406e1a7effb11ab884e31c687feaa91961ccc48929393b2a5a"} Nov 25 23:13:46 crc kubenswrapper[5045]: I1125 23:13:46.450869 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-p6hjd" event={"ID":"6bab5b2e-6c1e-4464-b7d8-973e76401ba5","Type":"ContainerStarted","Data":"289392cdae853d5bef20c54b50bfde9d47f414c296195a626ddef1b673664d1f"} Nov 25 23:13:47 crc kubenswrapper[5045]: I1125 23:13:47.200585 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-lx6lk"] Nov 25 23:13:47 crc kubenswrapper[5045]: I1125 23:13:47.201753 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-lx6lk" Nov 25 23:13:47 crc kubenswrapper[5045]: I1125 23:13:47.203984 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 25 23:13:47 crc kubenswrapper[5045]: I1125 23:13:47.204885 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-gphqw" Nov 25 23:13:47 crc kubenswrapper[5045]: I1125 23:13:47.209739 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 25 23:13:47 crc kubenswrapper[5045]: I1125 23:13:47.217533 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-lx6lk"] Nov 25 23:13:47 crc kubenswrapper[5045]: I1125 23:13:47.306768 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5kwlq\" (UniqueName: \"kubernetes.io/projected/7f3e1286-dfab-4d98-a782-dc7001d31854-kube-api-access-5kwlq\") pod \"openstack-operator-index-lx6lk\" (UID: \"7f3e1286-dfab-4d98-a782-dc7001d31854\") " pod="openstack-operators/openstack-operator-index-lx6lk" Nov 25 23:13:47 crc kubenswrapper[5045]: I1125 23:13:47.407699 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5kwlq\" (UniqueName: \"kubernetes.io/projected/7f3e1286-dfab-4d98-a782-dc7001d31854-kube-api-access-5kwlq\") pod \"openstack-operator-index-lx6lk\" (UID: \"7f3e1286-dfab-4d98-a782-dc7001d31854\") " pod="openstack-operators/openstack-operator-index-lx6lk" Nov 25 23:13:47 crc kubenswrapper[5045]: I1125 23:13:47.433703 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5kwlq\" (UniqueName: \"kubernetes.io/projected/7f3e1286-dfab-4d98-a782-dc7001d31854-kube-api-access-5kwlq\") pod \"openstack-operator-index-lx6lk\" (UID: \"7f3e1286-dfab-4d98-a782-dc7001d31854\") " pod="openstack-operators/openstack-operator-index-lx6lk" Nov 25 23:13:47 crc kubenswrapper[5045]: I1125 23:13:47.459477 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-p6hjd" event={"ID":"6bab5b2e-6c1e-4464-b7d8-973e76401ba5","Type":"ContainerStarted","Data":"acd9d5f3096075ab99e92feffe9e55ca12c8df9d840175f5b806135b49157160"} Nov 25 23:13:47 crc kubenswrapper[5045]: I1125 23:13:47.459515 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-p6hjd" event={"ID":"6bab5b2e-6c1e-4464-b7d8-973e76401ba5","Type":"ContainerStarted","Data":"40dee5b829d98252217e96a678366040f043a7839968ee8e170e320fa34cde27"} Nov 25 23:13:47 crc kubenswrapper[5045]: I1125 23:13:47.459674 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:47 crc kubenswrapper[5045]: I1125 23:13:47.480817 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-p6hjd" podStartSLOduration=5.96537772 podStartE2EDuration="15.480802561s" podCreationTimestamp="2025-11-25 23:13:32 +0000 UTC" firstStartedPulling="2025-11-25 23:13:32.916615861 +0000 UTC m=+869.274274973" lastFinishedPulling="2025-11-25 23:13:42.432040692 +0000 UTC m=+878.789699814" observedRunningTime="2025-11-25 23:13:47.477630849 +0000 UTC m=+883.835289981" watchObservedRunningTime="2025-11-25 23:13:47.480802561 +0000 UTC m=+883.838461673" Nov 25 23:13:47 crc kubenswrapper[5045]: I1125 23:13:47.525889 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-lx6lk" Nov 25 23:13:47 crc kubenswrapper[5045]: I1125 23:13:47.639148 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:47 crc kubenswrapper[5045]: I1125 23:13:47.679441 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:13:47 crc kubenswrapper[5045]: I1125 23:13:47.949335 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-lx6lk"] Nov 25 23:13:47 crc kubenswrapper[5045]: W1125 23:13:47.957033 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7f3e1286_dfab_4d98_a782_dc7001d31854.slice/crio-7a96422a912bc45ef36972d583178276ffd0d40e5a10f940ce0a25b3e353fb63 WatchSource:0}: Error finding container 7a96422a912bc45ef36972d583178276ffd0d40e5a10f940ce0a25b3e353fb63: Status 404 returned error can't find the container with id 7a96422a912bc45ef36972d583178276ffd0d40e5a10f940ce0a25b3e353fb63 Nov 25 23:13:48 crc kubenswrapper[5045]: I1125 23:13:48.469926 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-lx6lk" event={"ID":"7f3e1286-dfab-4d98-a782-dc7001d31854","Type":"ContainerStarted","Data":"7a96422a912bc45ef36972d583178276ffd0d40e5a10f940ce0a25b3e353fb63"} Nov 25 23:13:50 crc kubenswrapper[5045]: I1125 23:13:50.567374 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-lx6lk"] Nov 25 23:13:51 crc kubenswrapper[5045]: I1125 23:13:51.172493 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-bl9nc"] Nov 25 23:13:51 crc kubenswrapper[5045]: I1125 23:13:51.173783 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-bl9nc" Nov 25 23:13:51 crc kubenswrapper[5045]: I1125 23:13:51.176803 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-bl9nc"] Nov 25 23:13:51 crc kubenswrapper[5045]: I1125 23:13:51.269643 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dllch\" (UniqueName: \"kubernetes.io/projected/59064a60-9836-4862-8b6b-ba68ce13975d-kube-api-access-dllch\") pod \"openstack-operator-index-bl9nc\" (UID: \"59064a60-9836-4862-8b6b-ba68ce13975d\") " pod="openstack-operators/openstack-operator-index-bl9nc" Nov 25 23:13:51 crc kubenswrapper[5045]: I1125 23:13:51.371151 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dllch\" (UniqueName: \"kubernetes.io/projected/59064a60-9836-4862-8b6b-ba68ce13975d-kube-api-access-dllch\") pod \"openstack-operator-index-bl9nc\" (UID: \"59064a60-9836-4862-8b6b-ba68ce13975d\") " pod="openstack-operators/openstack-operator-index-bl9nc" Nov 25 23:13:51 crc kubenswrapper[5045]: I1125 23:13:51.426072 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dllch\" (UniqueName: \"kubernetes.io/projected/59064a60-9836-4862-8b6b-ba68ce13975d-kube-api-access-dllch\") pod \"openstack-operator-index-bl9nc\" (UID: \"59064a60-9836-4862-8b6b-ba68ce13975d\") " pod="openstack-operators/openstack-operator-index-bl9nc" Nov 25 23:13:51 crc kubenswrapper[5045]: I1125 23:13:51.510173 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-bl9nc" Nov 25 23:13:52 crc kubenswrapper[5045]: I1125 23:13:52.655505 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5l6nm" Nov 25 23:13:52 crc kubenswrapper[5045]: I1125 23:13:52.740488 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-4g95c" Nov 25 23:13:54 crc kubenswrapper[5045]: I1125 23:13:54.376444 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-bl9nc"] Nov 25 23:13:54 crc kubenswrapper[5045]: W1125 23:13:54.424682 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod59064a60_9836_4862_8b6b_ba68ce13975d.slice/crio-d9bec84d6a35192d2181afe1025cb8ac377a8418ce33bb055412ebe55b81ca75 WatchSource:0}: Error finding container d9bec84d6a35192d2181afe1025cb8ac377a8418ce33bb055412ebe55b81ca75: Status 404 returned error can't find the container with id d9bec84d6a35192d2181afe1025cb8ac377a8418ce33bb055412ebe55b81ca75 Nov 25 23:13:54 crc kubenswrapper[5045]: I1125 23:13:54.511979 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-bl9nc" event={"ID":"59064a60-9836-4862-8b6b-ba68ce13975d","Type":"ContainerStarted","Data":"d9bec84d6a35192d2181afe1025cb8ac377a8418ce33bb055412ebe55b81ca75"} Nov 25 23:13:56 crc kubenswrapper[5045]: I1125 23:13:56.530264 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-lx6lk" event={"ID":"7f3e1286-dfab-4d98-a782-dc7001d31854","Type":"ContainerStarted","Data":"35978c13d24f5a1dbcb062b6c9f3cf876cb4723845c2b044771aac8bc8ae9e83"} Nov 25 23:13:56 crc kubenswrapper[5045]: I1125 23:13:56.530373 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-lx6lk" podUID="7f3e1286-dfab-4d98-a782-dc7001d31854" containerName="registry-server" containerID="cri-o://35978c13d24f5a1dbcb062b6c9f3cf876cb4723845c2b044771aac8bc8ae9e83" gracePeriod=2 Nov 25 23:13:56 crc kubenswrapper[5045]: I1125 23:13:56.532869 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-bl9nc" event={"ID":"59064a60-9836-4862-8b6b-ba68ce13975d","Type":"ContainerStarted","Data":"b1cceed3bb9f70ba59ffd8d1934a513eb39f907b80192dc3664a966f88a7ff31"} Nov 25 23:13:56 crc kubenswrapper[5045]: I1125 23:13:56.547967 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-lx6lk" podStartSLOduration=2.11951671 podStartE2EDuration="9.547945979s" podCreationTimestamp="2025-11-25 23:13:47 +0000 UTC" firstStartedPulling="2025-11-25 23:13:47.958651429 +0000 UTC m=+884.316310531" lastFinishedPulling="2025-11-25 23:13:55.387080668 +0000 UTC m=+891.744739800" observedRunningTime="2025-11-25 23:13:56.546192049 +0000 UTC m=+892.903851171" watchObservedRunningTime="2025-11-25 23:13:56.547945979 +0000 UTC m=+892.905605091" Nov 25 23:13:56 crc kubenswrapper[5045]: I1125 23:13:56.567870 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-bl9nc" podStartSLOduration=4.604991993 podStartE2EDuration="5.567843745s" podCreationTimestamp="2025-11-25 23:13:51 +0000 UTC" firstStartedPulling="2025-11-25 23:13:54.427077598 +0000 UTC m=+890.784736710" lastFinishedPulling="2025-11-25 23:13:55.38992934 +0000 UTC m=+891.747588462" observedRunningTime="2025-11-25 23:13:56.56213545 +0000 UTC m=+892.919794582" watchObservedRunningTime="2025-11-25 23:13:56.567843745 +0000 UTC m=+892.925502897" Nov 25 23:13:57 crc kubenswrapper[5045]: I1125 23:13:57.048941 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-lx6lk" Nov 25 23:13:57 crc kubenswrapper[5045]: I1125 23:13:57.214833 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5kwlq\" (UniqueName: \"kubernetes.io/projected/7f3e1286-dfab-4d98-a782-dc7001d31854-kube-api-access-5kwlq\") pod \"7f3e1286-dfab-4d98-a782-dc7001d31854\" (UID: \"7f3e1286-dfab-4d98-a782-dc7001d31854\") " Nov 25 23:13:57 crc kubenswrapper[5045]: I1125 23:13:57.219486 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f3e1286-dfab-4d98-a782-dc7001d31854-kube-api-access-5kwlq" (OuterVolumeSpecName: "kube-api-access-5kwlq") pod "7f3e1286-dfab-4d98-a782-dc7001d31854" (UID: "7f3e1286-dfab-4d98-a782-dc7001d31854"). InnerVolumeSpecName "kube-api-access-5kwlq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:13:57 crc kubenswrapper[5045]: I1125 23:13:57.316574 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5kwlq\" (UniqueName: \"kubernetes.io/projected/7f3e1286-dfab-4d98-a782-dc7001d31854-kube-api-access-5kwlq\") on node \"crc\" DevicePath \"\"" Nov 25 23:13:57 crc kubenswrapper[5045]: I1125 23:13:57.543494 5045 generic.go:334] "Generic (PLEG): container finished" podID="7f3e1286-dfab-4d98-a782-dc7001d31854" containerID="35978c13d24f5a1dbcb062b6c9f3cf876cb4723845c2b044771aac8bc8ae9e83" exitCode=0 Nov 25 23:13:57 crc kubenswrapper[5045]: I1125 23:13:57.543613 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-lx6lk" Nov 25 23:13:57 crc kubenswrapper[5045]: I1125 23:13:57.543649 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-lx6lk" event={"ID":"7f3e1286-dfab-4d98-a782-dc7001d31854","Type":"ContainerDied","Data":"35978c13d24f5a1dbcb062b6c9f3cf876cb4723845c2b044771aac8bc8ae9e83"} Nov 25 23:13:57 crc kubenswrapper[5045]: I1125 23:13:57.544499 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-lx6lk" event={"ID":"7f3e1286-dfab-4d98-a782-dc7001d31854","Type":"ContainerDied","Data":"7a96422a912bc45ef36972d583178276ffd0d40e5a10f940ce0a25b3e353fb63"} Nov 25 23:13:57 crc kubenswrapper[5045]: I1125 23:13:57.544563 5045 scope.go:117] "RemoveContainer" containerID="35978c13d24f5a1dbcb062b6c9f3cf876cb4723845c2b044771aac8bc8ae9e83" Nov 25 23:13:57 crc kubenswrapper[5045]: I1125 23:13:57.575420 5045 scope.go:117] "RemoveContainer" containerID="35978c13d24f5a1dbcb062b6c9f3cf876cb4723845c2b044771aac8bc8ae9e83" Nov 25 23:13:57 crc kubenswrapper[5045]: E1125 23:13:57.575854 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35978c13d24f5a1dbcb062b6c9f3cf876cb4723845c2b044771aac8bc8ae9e83\": container with ID starting with 35978c13d24f5a1dbcb062b6c9f3cf876cb4723845c2b044771aac8bc8ae9e83 not found: ID does not exist" containerID="35978c13d24f5a1dbcb062b6c9f3cf876cb4723845c2b044771aac8bc8ae9e83" Nov 25 23:13:57 crc kubenswrapper[5045]: I1125 23:13:57.575890 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35978c13d24f5a1dbcb062b6c9f3cf876cb4723845c2b044771aac8bc8ae9e83"} err="failed to get container status \"35978c13d24f5a1dbcb062b6c9f3cf876cb4723845c2b044771aac8bc8ae9e83\": rpc error: code = NotFound desc = could not find container \"35978c13d24f5a1dbcb062b6c9f3cf876cb4723845c2b044771aac8bc8ae9e83\": container with ID starting with 35978c13d24f5a1dbcb062b6c9f3cf876cb4723845c2b044771aac8bc8ae9e83 not found: ID does not exist" Nov 25 23:13:57 crc kubenswrapper[5045]: I1125 23:13:57.586893 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-lx6lk"] Nov 25 23:13:57 crc kubenswrapper[5045]: I1125 23:13:57.592843 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-lx6lk"] Nov 25 23:13:58 crc kubenswrapper[5045]: I1125 23:13:58.406551 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f3e1286-dfab-4d98-a782-dc7001d31854" path="/var/lib/kubelet/pods/7f3e1286-dfab-4d98-a782-dc7001d31854/volumes" Nov 25 23:14:00 crc kubenswrapper[5045]: I1125 23:14:00.540687 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:14:00 crc kubenswrapper[5045]: I1125 23:14:00.540830 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:14:01 crc kubenswrapper[5045]: I1125 23:14:01.510579 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-bl9nc" Nov 25 23:14:01 crc kubenswrapper[5045]: I1125 23:14:01.510667 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-bl9nc" Nov 25 23:14:01 crc kubenswrapper[5045]: I1125 23:14:01.543011 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-bl9nc" Nov 25 23:14:01 crc kubenswrapper[5045]: I1125 23:14:01.607249 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-bl9nc" Nov 25 23:14:02 crc kubenswrapper[5045]: I1125 23:14:02.643212 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-p6hjd" Nov 25 23:14:02 crc kubenswrapper[5045]: I1125 23:14:02.821897 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92"] Nov 25 23:14:02 crc kubenswrapper[5045]: E1125 23:14:02.822253 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f3e1286-dfab-4d98-a782-dc7001d31854" containerName="registry-server" Nov 25 23:14:02 crc kubenswrapper[5045]: I1125 23:14:02.822278 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f3e1286-dfab-4d98-a782-dc7001d31854" containerName="registry-server" Nov 25 23:14:02 crc kubenswrapper[5045]: I1125 23:14:02.822497 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f3e1286-dfab-4d98-a782-dc7001d31854" containerName="registry-server" Nov 25 23:14:02 crc kubenswrapper[5045]: I1125 23:14:02.824356 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92" Nov 25 23:14:02 crc kubenswrapper[5045]: I1125 23:14:02.827216 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-qh9fm" Nov 25 23:14:02 crc kubenswrapper[5045]: I1125 23:14:02.842654 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92"] Nov 25 23:14:02 crc kubenswrapper[5045]: I1125 23:14:02.902269 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a95c5e20-6f36-41d3-a164-1768574075ee-bundle\") pod \"1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92\" (UID: \"a95c5e20-6f36-41d3-a164-1768574075ee\") " pod="openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92" Nov 25 23:14:02 crc kubenswrapper[5045]: I1125 23:14:02.902435 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxxtt\" (UniqueName: \"kubernetes.io/projected/a95c5e20-6f36-41d3-a164-1768574075ee-kube-api-access-bxxtt\") pod \"1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92\" (UID: \"a95c5e20-6f36-41d3-a164-1768574075ee\") " pod="openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92" Nov 25 23:14:02 crc kubenswrapper[5045]: I1125 23:14:02.902664 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a95c5e20-6f36-41d3-a164-1768574075ee-util\") pod \"1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92\" (UID: \"a95c5e20-6f36-41d3-a164-1768574075ee\") " pod="openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92" Nov 25 23:14:03 crc kubenswrapper[5045]: I1125 23:14:03.004612 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxxtt\" (UniqueName: \"kubernetes.io/projected/a95c5e20-6f36-41d3-a164-1768574075ee-kube-api-access-bxxtt\") pod \"1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92\" (UID: \"a95c5e20-6f36-41d3-a164-1768574075ee\") " pod="openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92" Nov 25 23:14:03 crc kubenswrapper[5045]: I1125 23:14:03.004691 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a95c5e20-6f36-41d3-a164-1768574075ee-util\") pod \"1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92\" (UID: \"a95c5e20-6f36-41d3-a164-1768574075ee\") " pod="openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92" Nov 25 23:14:03 crc kubenswrapper[5045]: I1125 23:14:03.004790 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a95c5e20-6f36-41d3-a164-1768574075ee-bundle\") pod \"1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92\" (UID: \"a95c5e20-6f36-41d3-a164-1768574075ee\") " pod="openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92" Nov 25 23:14:03 crc kubenswrapper[5045]: I1125 23:14:03.005308 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a95c5e20-6f36-41d3-a164-1768574075ee-bundle\") pod \"1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92\" (UID: \"a95c5e20-6f36-41d3-a164-1768574075ee\") " pod="openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92" Nov 25 23:14:03 crc kubenswrapper[5045]: I1125 23:14:03.005775 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a95c5e20-6f36-41d3-a164-1768574075ee-util\") pod \"1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92\" (UID: \"a95c5e20-6f36-41d3-a164-1768574075ee\") " pod="openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92" Nov 25 23:14:03 crc kubenswrapper[5045]: I1125 23:14:03.028961 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxxtt\" (UniqueName: \"kubernetes.io/projected/a95c5e20-6f36-41d3-a164-1768574075ee-kube-api-access-bxxtt\") pod \"1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92\" (UID: \"a95c5e20-6f36-41d3-a164-1768574075ee\") " pod="openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92" Nov 25 23:14:03 crc kubenswrapper[5045]: I1125 23:14:03.149273 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92" Nov 25 23:14:03 crc kubenswrapper[5045]: I1125 23:14:03.442007 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92"] Nov 25 23:14:03 crc kubenswrapper[5045]: I1125 23:14:03.589164 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92" event={"ID":"a95c5e20-6f36-41d3-a164-1768574075ee","Type":"ContainerStarted","Data":"4f82db0df5f4e8b6dc3b1e135d1b7c777ecac8e340054f76673223758c5d2eea"} Nov 25 23:14:04 crc kubenswrapper[5045]: I1125 23:14:04.599207 5045 generic.go:334] "Generic (PLEG): container finished" podID="a95c5e20-6f36-41d3-a164-1768574075ee" containerID="b58e3376bb5323d50374f653d09cb59ed3ab18aa81c3d8e4270d6acae9b0f5db" exitCode=0 Nov 25 23:14:04 crc kubenswrapper[5045]: I1125 23:14:04.599299 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92" event={"ID":"a95c5e20-6f36-41d3-a164-1768574075ee","Type":"ContainerDied","Data":"b58e3376bb5323d50374f653d09cb59ed3ab18aa81c3d8e4270d6acae9b0f5db"} Nov 25 23:14:06 crc kubenswrapper[5045]: I1125 23:14:06.618517 5045 generic.go:334] "Generic (PLEG): container finished" podID="a95c5e20-6f36-41d3-a164-1768574075ee" containerID="475b5186f42b137561bd108d58846c82f4fe0b329c8b65f7ecc6e80c2893edeb" exitCode=0 Nov 25 23:14:06 crc kubenswrapper[5045]: I1125 23:14:06.618609 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92" event={"ID":"a95c5e20-6f36-41d3-a164-1768574075ee","Type":"ContainerDied","Data":"475b5186f42b137561bd108d58846c82f4fe0b329c8b65f7ecc6e80c2893edeb"} Nov 25 23:14:07 crc kubenswrapper[5045]: I1125 23:14:07.628001 5045 generic.go:334] "Generic (PLEG): container finished" podID="a95c5e20-6f36-41d3-a164-1768574075ee" containerID="7647c4cc5b7cfc636ab6ce9fc253d5b1424edd3266f06edaa096826dcb7671d5" exitCode=0 Nov 25 23:14:07 crc kubenswrapper[5045]: I1125 23:14:07.628128 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92" event={"ID":"a95c5e20-6f36-41d3-a164-1768574075ee","Type":"ContainerDied","Data":"7647c4cc5b7cfc636ab6ce9fc253d5b1424edd3266f06edaa096826dcb7671d5"} Nov 25 23:14:08 crc kubenswrapper[5045]: I1125 23:14:08.930065 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92" Nov 25 23:14:09 crc kubenswrapper[5045]: I1125 23:14:09.110350 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a95c5e20-6f36-41d3-a164-1768574075ee-util\") pod \"a95c5e20-6f36-41d3-a164-1768574075ee\" (UID: \"a95c5e20-6f36-41d3-a164-1768574075ee\") " Nov 25 23:14:09 crc kubenswrapper[5045]: I1125 23:14:09.110529 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxxtt\" (UniqueName: \"kubernetes.io/projected/a95c5e20-6f36-41d3-a164-1768574075ee-kube-api-access-bxxtt\") pod \"a95c5e20-6f36-41d3-a164-1768574075ee\" (UID: \"a95c5e20-6f36-41d3-a164-1768574075ee\") " Nov 25 23:14:09 crc kubenswrapper[5045]: I1125 23:14:09.110619 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a95c5e20-6f36-41d3-a164-1768574075ee-bundle\") pod \"a95c5e20-6f36-41d3-a164-1768574075ee\" (UID: \"a95c5e20-6f36-41d3-a164-1768574075ee\") " Nov 25 23:14:09 crc kubenswrapper[5045]: I1125 23:14:09.111436 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a95c5e20-6f36-41d3-a164-1768574075ee-bundle" (OuterVolumeSpecName: "bundle") pod "a95c5e20-6f36-41d3-a164-1768574075ee" (UID: "a95c5e20-6f36-41d3-a164-1768574075ee"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:14:09 crc kubenswrapper[5045]: I1125 23:14:09.118957 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a95c5e20-6f36-41d3-a164-1768574075ee-kube-api-access-bxxtt" (OuterVolumeSpecName: "kube-api-access-bxxtt") pod "a95c5e20-6f36-41d3-a164-1768574075ee" (UID: "a95c5e20-6f36-41d3-a164-1768574075ee"). InnerVolumeSpecName "kube-api-access-bxxtt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:14:09 crc kubenswrapper[5045]: I1125 23:14:09.124008 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a95c5e20-6f36-41d3-a164-1768574075ee-util" (OuterVolumeSpecName: "util") pod "a95c5e20-6f36-41d3-a164-1768574075ee" (UID: "a95c5e20-6f36-41d3-a164-1768574075ee"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:14:09 crc kubenswrapper[5045]: I1125 23:14:09.216196 5045 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a95c5e20-6f36-41d3-a164-1768574075ee-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:14:09 crc kubenswrapper[5045]: I1125 23:14:09.216246 5045 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a95c5e20-6f36-41d3-a164-1768574075ee-util\") on node \"crc\" DevicePath \"\"" Nov 25 23:14:09 crc kubenswrapper[5045]: I1125 23:14:09.216272 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxxtt\" (UniqueName: \"kubernetes.io/projected/a95c5e20-6f36-41d3-a164-1768574075ee-kube-api-access-bxxtt\") on node \"crc\" DevicePath \"\"" Nov 25 23:14:09 crc kubenswrapper[5045]: I1125 23:14:09.646628 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92" event={"ID":"a95c5e20-6f36-41d3-a164-1768574075ee","Type":"ContainerDied","Data":"4f82db0df5f4e8b6dc3b1e135d1b7c777ecac8e340054f76673223758c5d2eea"} Nov 25 23:14:09 crc kubenswrapper[5045]: I1125 23:14:09.646950 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4f82db0df5f4e8b6dc3b1e135d1b7c777ecac8e340054f76673223758c5d2eea" Nov 25 23:14:09 crc kubenswrapper[5045]: I1125 23:14:09.646830 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92" Nov 25 23:14:15 crc kubenswrapper[5045]: I1125 23:14:15.027732 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7d45d649c4-bccwd"] Nov 25 23:14:15 crc kubenswrapper[5045]: E1125 23:14:15.028225 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a95c5e20-6f36-41d3-a164-1768574075ee" containerName="extract" Nov 25 23:14:15 crc kubenswrapper[5045]: I1125 23:14:15.028239 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="a95c5e20-6f36-41d3-a164-1768574075ee" containerName="extract" Nov 25 23:14:15 crc kubenswrapper[5045]: E1125 23:14:15.028264 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a95c5e20-6f36-41d3-a164-1768574075ee" containerName="util" Nov 25 23:14:15 crc kubenswrapper[5045]: I1125 23:14:15.028272 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="a95c5e20-6f36-41d3-a164-1768574075ee" containerName="util" Nov 25 23:14:15 crc kubenswrapper[5045]: E1125 23:14:15.028282 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a95c5e20-6f36-41d3-a164-1768574075ee" containerName="pull" Nov 25 23:14:15 crc kubenswrapper[5045]: I1125 23:14:15.028290 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="a95c5e20-6f36-41d3-a164-1768574075ee" containerName="pull" Nov 25 23:14:15 crc kubenswrapper[5045]: I1125 23:14:15.028414 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="a95c5e20-6f36-41d3-a164-1768574075ee" containerName="extract" Nov 25 23:14:15 crc kubenswrapper[5045]: I1125 23:14:15.034384 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7d45d649c4-bccwd" Nov 25 23:14:15 crc kubenswrapper[5045]: I1125 23:14:15.036441 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-dqt42" Nov 25 23:14:15 crc kubenswrapper[5045]: I1125 23:14:15.061888 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7d45d649c4-bccwd"] Nov 25 23:14:15 crc kubenswrapper[5045]: I1125 23:14:15.107568 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nxw5\" (UniqueName: \"kubernetes.io/projected/fc249ddc-e18a-4677-8e37-7b7d449876d9-kube-api-access-5nxw5\") pod \"openstack-operator-controller-operator-7d45d649c4-bccwd\" (UID: \"fc249ddc-e18a-4677-8e37-7b7d449876d9\") " pod="openstack-operators/openstack-operator-controller-operator-7d45d649c4-bccwd" Nov 25 23:14:15 crc kubenswrapper[5045]: I1125 23:14:15.208572 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nxw5\" (UniqueName: \"kubernetes.io/projected/fc249ddc-e18a-4677-8e37-7b7d449876d9-kube-api-access-5nxw5\") pod \"openstack-operator-controller-operator-7d45d649c4-bccwd\" (UID: \"fc249ddc-e18a-4677-8e37-7b7d449876d9\") " pod="openstack-operators/openstack-operator-controller-operator-7d45d649c4-bccwd" Nov 25 23:14:15 crc kubenswrapper[5045]: I1125 23:14:15.230126 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nxw5\" (UniqueName: \"kubernetes.io/projected/fc249ddc-e18a-4677-8e37-7b7d449876d9-kube-api-access-5nxw5\") pod \"openstack-operator-controller-operator-7d45d649c4-bccwd\" (UID: \"fc249ddc-e18a-4677-8e37-7b7d449876d9\") " pod="openstack-operators/openstack-operator-controller-operator-7d45d649c4-bccwd" Nov 25 23:14:15 crc kubenswrapper[5045]: I1125 23:14:15.356479 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7d45d649c4-bccwd" Nov 25 23:14:15 crc kubenswrapper[5045]: I1125 23:14:15.649791 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7d45d649c4-bccwd"] Nov 25 23:14:15 crc kubenswrapper[5045]: W1125 23:14:15.667925 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfc249ddc_e18a_4677_8e37_7b7d449876d9.slice/crio-42f24564a4247f9070de7106322cba6834bd2f4608d23b4294b114eb5adb2c09 WatchSource:0}: Error finding container 42f24564a4247f9070de7106322cba6834bd2f4608d23b4294b114eb5adb2c09: Status 404 returned error can't find the container with id 42f24564a4247f9070de7106322cba6834bd2f4608d23b4294b114eb5adb2c09 Nov 25 23:14:15 crc kubenswrapper[5045]: I1125 23:14:15.684127 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7d45d649c4-bccwd" event={"ID":"fc249ddc-e18a-4677-8e37-7b7d449876d9","Type":"ContainerStarted","Data":"42f24564a4247f9070de7106322cba6834bd2f4608d23b4294b114eb5adb2c09"} Nov 25 23:14:22 crc kubenswrapper[5045]: I1125 23:14:22.730360 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7d45d649c4-bccwd" event={"ID":"fc249ddc-e18a-4677-8e37-7b7d449876d9","Type":"ContainerStarted","Data":"9749b5d9edfc6ab0184a94971819163990ab346ae1cb61526a33475f288ebe9a"} Nov 25 23:14:22 crc kubenswrapper[5045]: I1125 23:14:22.731069 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-7d45d649c4-bccwd" Nov 25 23:14:22 crc kubenswrapper[5045]: I1125 23:14:22.783693 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-7d45d649c4-bccwd" podStartSLOduration=2.096230547 podStartE2EDuration="8.783662204s" podCreationTimestamp="2025-11-25 23:14:14 +0000 UTC" firstStartedPulling="2025-11-25 23:14:15.669783495 +0000 UTC m=+912.027442617" lastFinishedPulling="2025-11-25 23:14:22.357215122 +0000 UTC m=+918.714874274" observedRunningTime="2025-11-25 23:14:22.778243261 +0000 UTC m=+919.135902473" watchObservedRunningTime="2025-11-25 23:14:22.783662204 +0000 UTC m=+919.141321366" Nov 25 23:14:30 crc kubenswrapper[5045]: I1125 23:14:30.540947 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:14:30 crc kubenswrapper[5045]: I1125 23:14:30.541422 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:14:35 crc kubenswrapper[5045]: I1125 23:14:35.360706 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-7d45d649c4-bccwd" Nov 25 23:14:39 crc kubenswrapper[5045]: I1125 23:14:39.881016 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zpf6z"] Nov 25 23:14:39 crc kubenswrapper[5045]: I1125 23:14:39.887284 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zpf6z" Nov 25 23:14:39 crc kubenswrapper[5045]: I1125 23:14:39.900586 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zpf6z"] Nov 25 23:14:40 crc kubenswrapper[5045]: I1125 23:14:40.001612 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4whz\" (UniqueName: \"kubernetes.io/projected/95300418-dfba-43ea-a3c3-f212d6c51e32-kube-api-access-p4whz\") pod \"community-operators-zpf6z\" (UID: \"95300418-dfba-43ea-a3c3-f212d6c51e32\") " pod="openshift-marketplace/community-operators-zpf6z" Nov 25 23:14:40 crc kubenswrapper[5045]: I1125 23:14:40.001746 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95300418-dfba-43ea-a3c3-f212d6c51e32-utilities\") pod \"community-operators-zpf6z\" (UID: \"95300418-dfba-43ea-a3c3-f212d6c51e32\") " pod="openshift-marketplace/community-operators-zpf6z" Nov 25 23:14:40 crc kubenswrapper[5045]: I1125 23:14:40.002051 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95300418-dfba-43ea-a3c3-f212d6c51e32-catalog-content\") pod \"community-operators-zpf6z\" (UID: \"95300418-dfba-43ea-a3c3-f212d6c51e32\") " pod="openshift-marketplace/community-operators-zpf6z" Nov 25 23:14:40 crc kubenswrapper[5045]: I1125 23:14:40.102965 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4whz\" (UniqueName: \"kubernetes.io/projected/95300418-dfba-43ea-a3c3-f212d6c51e32-kube-api-access-p4whz\") pod \"community-operators-zpf6z\" (UID: \"95300418-dfba-43ea-a3c3-f212d6c51e32\") " pod="openshift-marketplace/community-operators-zpf6z" Nov 25 23:14:40 crc kubenswrapper[5045]: I1125 23:14:40.103061 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95300418-dfba-43ea-a3c3-f212d6c51e32-utilities\") pod \"community-operators-zpf6z\" (UID: \"95300418-dfba-43ea-a3c3-f212d6c51e32\") " pod="openshift-marketplace/community-operators-zpf6z" Nov 25 23:14:40 crc kubenswrapper[5045]: I1125 23:14:40.103084 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95300418-dfba-43ea-a3c3-f212d6c51e32-catalog-content\") pod \"community-operators-zpf6z\" (UID: \"95300418-dfba-43ea-a3c3-f212d6c51e32\") " pod="openshift-marketplace/community-operators-zpf6z" Nov 25 23:14:40 crc kubenswrapper[5045]: I1125 23:14:40.103540 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95300418-dfba-43ea-a3c3-f212d6c51e32-catalog-content\") pod \"community-operators-zpf6z\" (UID: \"95300418-dfba-43ea-a3c3-f212d6c51e32\") " pod="openshift-marketplace/community-operators-zpf6z" Nov 25 23:14:40 crc kubenswrapper[5045]: I1125 23:14:40.103826 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95300418-dfba-43ea-a3c3-f212d6c51e32-utilities\") pod \"community-operators-zpf6z\" (UID: \"95300418-dfba-43ea-a3c3-f212d6c51e32\") " pod="openshift-marketplace/community-operators-zpf6z" Nov 25 23:14:40 crc kubenswrapper[5045]: I1125 23:14:40.136561 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4whz\" (UniqueName: \"kubernetes.io/projected/95300418-dfba-43ea-a3c3-f212d6c51e32-kube-api-access-p4whz\") pod \"community-operators-zpf6z\" (UID: \"95300418-dfba-43ea-a3c3-f212d6c51e32\") " pod="openshift-marketplace/community-operators-zpf6z" Nov 25 23:14:40 crc kubenswrapper[5045]: I1125 23:14:40.216924 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zpf6z" Nov 25 23:14:40 crc kubenswrapper[5045]: I1125 23:14:40.503286 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zpf6z"] Nov 25 23:14:40 crc kubenswrapper[5045]: I1125 23:14:40.870930 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zpf6z" event={"ID":"95300418-dfba-43ea-a3c3-f212d6c51e32","Type":"ContainerStarted","Data":"8648aa6deb5aa99ca27ee5534da82180bad0fd95363447bf9124ae51a1cbf366"} Nov 25 23:14:41 crc kubenswrapper[5045]: I1125 23:14:41.878036 5045 generic.go:334] "Generic (PLEG): container finished" podID="95300418-dfba-43ea-a3c3-f212d6c51e32" containerID="b0630498bb8f665528fc29b3f96d1e100dfa55ddb9969c5d0a2d8d228165c21d" exitCode=0 Nov 25 23:14:41 crc kubenswrapper[5045]: I1125 23:14:41.878152 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zpf6z" event={"ID":"95300418-dfba-43ea-a3c3-f212d6c51e32","Type":"ContainerDied","Data":"b0630498bb8f665528fc29b3f96d1e100dfa55ddb9969c5d0a2d8d228165c21d"} Nov 25 23:14:43 crc kubenswrapper[5045]: I1125 23:14:43.891120 5045 generic.go:334] "Generic (PLEG): container finished" podID="95300418-dfba-43ea-a3c3-f212d6c51e32" containerID="0b40686303559d0e50c7518683ce7d3b02c4b37b3087f5eb1109e6e24fcd6c5a" exitCode=0 Nov 25 23:14:43 crc kubenswrapper[5045]: I1125 23:14:43.892315 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zpf6z" event={"ID":"95300418-dfba-43ea-a3c3-f212d6c51e32","Type":"ContainerDied","Data":"0b40686303559d0e50c7518683ce7d3b02c4b37b3087f5eb1109e6e24fcd6c5a"} Nov 25 23:14:44 crc kubenswrapper[5045]: I1125 23:14:44.900077 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zpf6z" event={"ID":"95300418-dfba-43ea-a3c3-f212d6c51e32","Type":"ContainerStarted","Data":"d2bfd0a32d6f6ea30e71c5f6c6de099bdd42f8ef5373cdc7dc20f29fc7ed4a64"} Nov 25 23:14:44 crc kubenswrapper[5045]: I1125 23:14:44.928155 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zpf6z" podStartSLOduration=3.488603931 podStartE2EDuration="5.928138388s" podCreationTimestamp="2025-11-25 23:14:39 +0000 UTC" firstStartedPulling="2025-11-25 23:14:41.87920824 +0000 UTC m=+938.236867352" lastFinishedPulling="2025-11-25 23:14:44.318742697 +0000 UTC m=+940.676401809" observedRunningTime="2025-11-25 23:14:44.926186713 +0000 UTC m=+941.283845825" watchObservedRunningTime="2025-11-25 23:14:44.928138388 +0000 UTC m=+941.285797500" Nov 25 23:14:50 crc kubenswrapper[5045]: I1125 23:14:50.218116 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zpf6z" Nov 25 23:14:50 crc kubenswrapper[5045]: I1125 23:14:50.221033 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zpf6z" Nov 25 23:14:50 crc kubenswrapper[5045]: I1125 23:14:50.290540 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zpf6z" Nov 25 23:14:50 crc kubenswrapper[5045]: I1125 23:14:50.976866 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zpf6z" Nov 25 23:14:51 crc kubenswrapper[5045]: I1125 23:14:51.028605 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zpf6z"] Nov 25 23:14:52 crc kubenswrapper[5045]: I1125 23:14:52.952946 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-zpf6z" podUID="95300418-dfba-43ea-a3c3-f212d6c51e32" containerName="registry-server" containerID="cri-o://d2bfd0a32d6f6ea30e71c5f6c6de099bdd42f8ef5373cdc7dc20f29fc7ed4a64" gracePeriod=2 Nov 25 23:14:53 crc kubenswrapper[5045]: I1125 23:14:53.318337 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zpf6z" Nov 25 23:14:53 crc kubenswrapper[5045]: I1125 23:14:53.485584 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95300418-dfba-43ea-a3c3-f212d6c51e32-utilities\") pod \"95300418-dfba-43ea-a3c3-f212d6c51e32\" (UID: \"95300418-dfba-43ea-a3c3-f212d6c51e32\") " Nov 25 23:14:53 crc kubenswrapper[5045]: I1125 23:14:53.485749 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95300418-dfba-43ea-a3c3-f212d6c51e32-catalog-content\") pod \"95300418-dfba-43ea-a3c3-f212d6c51e32\" (UID: \"95300418-dfba-43ea-a3c3-f212d6c51e32\") " Nov 25 23:14:53 crc kubenswrapper[5045]: I1125 23:14:53.485795 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p4whz\" (UniqueName: \"kubernetes.io/projected/95300418-dfba-43ea-a3c3-f212d6c51e32-kube-api-access-p4whz\") pod \"95300418-dfba-43ea-a3c3-f212d6c51e32\" (UID: \"95300418-dfba-43ea-a3c3-f212d6c51e32\") " Nov 25 23:14:53 crc kubenswrapper[5045]: I1125 23:14:53.486397 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95300418-dfba-43ea-a3c3-f212d6c51e32-utilities" (OuterVolumeSpecName: "utilities") pod "95300418-dfba-43ea-a3c3-f212d6c51e32" (UID: "95300418-dfba-43ea-a3c3-f212d6c51e32"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:14:53 crc kubenswrapper[5045]: I1125 23:14:53.498222 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95300418-dfba-43ea-a3c3-f212d6c51e32-kube-api-access-p4whz" (OuterVolumeSpecName: "kube-api-access-p4whz") pod "95300418-dfba-43ea-a3c3-f212d6c51e32" (UID: "95300418-dfba-43ea-a3c3-f212d6c51e32"). InnerVolumeSpecName "kube-api-access-p4whz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:14:53 crc kubenswrapper[5045]: I1125 23:14:53.547502 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95300418-dfba-43ea-a3c3-f212d6c51e32-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "95300418-dfba-43ea-a3c3-f212d6c51e32" (UID: "95300418-dfba-43ea-a3c3-f212d6c51e32"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:14:53 crc kubenswrapper[5045]: I1125 23:14:53.587043 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95300418-dfba-43ea-a3c3-f212d6c51e32-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:14:53 crc kubenswrapper[5045]: I1125 23:14:53.587081 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p4whz\" (UniqueName: \"kubernetes.io/projected/95300418-dfba-43ea-a3c3-f212d6c51e32-kube-api-access-p4whz\") on node \"crc\" DevicePath \"\"" Nov 25 23:14:53 crc kubenswrapper[5045]: I1125 23:14:53.587097 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95300418-dfba-43ea-a3c3-f212d6c51e32-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:14:53 crc kubenswrapper[5045]: I1125 23:14:53.960106 5045 generic.go:334] "Generic (PLEG): container finished" podID="95300418-dfba-43ea-a3c3-f212d6c51e32" containerID="d2bfd0a32d6f6ea30e71c5f6c6de099bdd42f8ef5373cdc7dc20f29fc7ed4a64" exitCode=0 Nov 25 23:14:53 crc kubenswrapper[5045]: I1125 23:14:53.960170 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zpf6z" Nov 25 23:14:53 crc kubenswrapper[5045]: I1125 23:14:53.960170 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zpf6z" event={"ID":"95300418-dfba-43ea-a3c3-f212d6c51e32","Type":"ContainerDied","Data":"d2bfd0a32d6f6ea30e71c5f6c6de099bdd42f8ef5373cdc7dc20f29fc7ed4a64"} Nov 25 23:14:53 crc kubenswrapper[5045]: I1125 23:14:53.961118 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zpf6z" event={"ID":"95300418-dfba-43ea-a3c3-f212d6c51e32","Type":"ContainerDied","Data":"8648aa6deb5aa99ca27ee5534da82180bad0fd95363447bf9124ae51a1cbf366"} Nov 25 23:14:53 crc kubenswrapper[5045]: I1125 23:14:53.961138 5045 scope.go:117] "RemoveContainer" containerID="d2bfd0a32d6f6ea30e71c5f6c6de099bdd42f8ef5373cdc7dc20f29fc7ed4a64" Nov 25 23:14:53 crc kubenswrapper[5045]: I1125 23:14:53.994103 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zpf6z"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.002045 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-zpf6z"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.172029 5045 scope.go:117] "RemoveContainer" containerID="0b40686303559d0e50c7518683ce7d3b02c4b37b3087f5eb1109e6e24fcd6c5a" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.196344 5045 scope.go:117] "RemoveContainer" containerID="b0630498bb8f665528fc29b3f96d1e100dfa55ddb9969c5d0a2d8d228165c21d" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.219925 5045 scope.go:117] "RemoveContainer" containerID="d2bfd0a32d6f6ea30e71c5f6c6de099bdd42f8ef5373cdc7dc20f29fc7ed4a64" Nov 25 23:14:54 crc kubenswrapper[5045]: E1125 23:14:54.220379 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2bfd0a32d6f6ea30e71c5f6c6de099bdd42f8ef5373cdc7dc20f29fc7ed4a64\": container with ID starting with d2bfd0a32d6f6ea30e71c5f6c6de099bdd42f8ef5373cdc7dc20f29fc7ed4a64 not found: ID does not exist" containerID="d2bfd0a32d6f6ea30e71c5f6c6de099bdd42f8ef5373cdc7dc20f29fc7ed4a64" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.220417 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2bfd0a32d6f6ea30e71c5f6c6de099bdd42f8ef5373cdc7dc20f29fc7ed4a64"} err="failed to get container status \"d2bfd0a32d6f6ea30e71c5f6c6de099bdd42f8ef5373cdc7dc20f29fc7ed4a64\": rpc error: code = NotFound desc = could not find container \"d2bfd0a32d6f6ea30e71c5f6c6de099bdd42f8ef5373cdc7dc20f29fc7ed4a64\": container with ID starting with d2bfd0a32d6f6ea30e71c5f6c6de099bdd42f8ef5373cdc7dc20f29fc7ed4a64 not found: ID does not exist" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.220444 5045 scope.go:117] "RemoveContainer" containerID="0b40686303559d0e50c7518683ce7d3b02c4b37b3087f5eb1109e6e24fcd6c5a" Nov 25 23:14:54 crc kubenswrapper[5045]: E1125 23:14:54.220898 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b40686303559d0e50c7518683ce7d3b02c4b37b3087f5eb1109e6e24fcd6c5a\": container with ID starting with 0b40686303559d0e50c7518683ce7d3b02c4b37b3087f5eb1109e6e24fcd6c5a not found: ID does not exist" containerID="0b40686303559d0e50c7518683ce7d3b02c4b37b3087f5eb1109e6e24fcd6c5a" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.220928 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b40686303559d0e50c7518683ce7d3b02c4b37b3087f5eb1109e6e24fcd6c5a"} err="failed to get container status \"0b40686303559d0e50c7518683ce7d3b02c4b37b3087f5eb1109e6e24fcd6c5a\": rpc error: code = NotFound desc = could not find container \"0b40686303559d0e50c7518683ce7d3b02c4b37b3087f5eb1109e6e24fcd6c5a\": container with ID starting with 0b40686303559d0e50c7518683ce7d3b02c4b37b3087f5eb1109e6e24fcd6c5a not found: ID does not exist" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.220944 5045 scope.go:117] "RemoveContainer" containerID="b0630498bb8f665528fc29b3f96d1e100dfa55ddb9969c5d0a2d8d228165c21d" Nov 25 23:14:54 crc kubenswrapper[5045]: E1125 23:14:54.221134 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0630498bb8f665528fc29b3f96d1e100dfa55ddb9969c5d0a2d8d228165c21d\": container with ID starting with b0630498bb8f665528fc29b3f96d1e100dfa55ddb9969c5d0a2d8d228165c21d not found: ID does not exist" containerID="b0630498bb8f665528fc29b3f96d1e100dfa55ddb9969c5d0a2d8d228165c21d" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.221154 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0630498bb8f665528fc29b3f96d1e100dfa55ddb9969c5d0a2d8d228165c21d"} err="failed to get container status \"b0630498bb8f665528fc29b3f96d1e100dfa55ddb9969c5d0a2d8d228165c21d\": rpc error: code = NotFound desc = could not find container \"b0630498bb8f665528fc29b3f96d1e100dfa55ddb9969c5d0a2d8d228165c21d\": container with ID starting with b0630498bb8f665528fc29b3f96d1e100dfa55ddb9969c5d0a2d8d228165c21d not found: ID does not exist" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.413266 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="95300418-dfba-43ea-a3c3-f212d6c51e32" path="/var/lib/kubelet/pods/95300418-dfba-43ea-a3c3-f212d6c51e32/volumes" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.574857 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-tkwkz"] Nov 25 23:14:54 crc kubenswrapper[5045]: E1125 23:14:54.575156 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95300418-dfba-43ea-a3c3-f212d6c51e32" containerName="registry-server" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.575179 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="95300418-dfba-43ea-a3c3-f212d6c51e32" containerName="registry-server" Nov 25 23:14:54 crc kubenswrapper[5045]: E1125 23:14:54.575200 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95300418-dfba-43ea-a3c3-f212d6c51e32" containerName="extract-content" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.575208 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="95300418-dfba-43ea-a3c3-f212d6c51e32" containerName="extract-content" Nov 25 23:14:54 crc kubenswrapper[5045]: E1125 23:14:54.575222 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95300418-dfba-43ea-a3c3-f212d6c51e32" containerName="extract-utilities" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.575232 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="95300418-dfba-43ea-a3c3-f212d6c51e32" containerName="extract-utilities" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.575399 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="95300418-dfba-43ea-a3c3-f212d6c51e32" containerName="registry-server" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.576180 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-tkwkz" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.577547 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-flhlk" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.580196 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hmwhx"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.582400 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hmwhx" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.585598 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-b4gtg" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.587180 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-tkwkz"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.621292 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hmwhx"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.635568 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-npq84"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.637018 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-npq84" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.643403 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-pdzgn" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.649358 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-76f7fdd9bd-84dxm"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.650622 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-76f7fdd9bd-84dxm" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.663150 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-qjnrg" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.676103 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-76f7fdd9bd-84dxm"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.690785 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-npq84"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.696771 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-8mpkk"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.698177 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-8mpkk" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.708687 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktwhl\" (UniqueName: \"kubernetes.io/projected/e287d4e5-6925-42eb-a661-fded8259123f-kube-api-access-ktwhl\") pod \"barbican-operator-controller-manager-7b64f4fb85-hmwhx\" (UID: \"e287d4e5-6925-42eb-a661-fded8259123f\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hmwhx" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.709035 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gchdg\" (UniqueName: \"kubernetes.io/projected/f1030448-0fd8-42d3-9a83-2e27d87c855e-kube-api-access-gchdg\") pod \"cinder-operator-controller-manager-6b7f75547b-tkwkz\" (UID: \"f1030448-0fd8-42d3-9a83-2e27d87c855e\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-tkwkz" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.711271 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-qzhwg"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.712365 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-qzhwg" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.717055 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-9qjnt" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.717317 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-k2msk" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.721794 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-8mpkk"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.744766 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.745822 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.747499 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-c4zhs" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.748827 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.752994 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-qzhwg"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.785630 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-mb2mk"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.786817 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-mb2mk" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.791933 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-88nmb" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.798780 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.810773 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-7rzvl"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.850289 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-7rzvl" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.861816 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktwhl\" (UniqueName: \"kubernetes.io/projected/e287d4e5-6925-42eb-a661-fded8259123f-kube-api-access-ktwhl\") pod \"barbican-operator-controller-manager-7b64f4fb85-hmwhx\" (UID: \"e287d4e5-6925-42eb-a661-fded8259123f\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hmwhx" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.862030 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tpr2k\" (UniqueName: \"kubernetes.io/projected/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-kube-api-access-tpr2k\") pod \"infra-operator-controller-manager-57548d458d-dhjmg\" (UID: \"cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.862154 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-cert\") pod \"infra-operator-controller-manager-57548d458d-dhjmg\" (UID: \"cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.862269 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2wvk\" (UniqueName: \"kubernetes.io/projected/344fd1ea-983e-4515-aa8a-479ec0c46c81-kube-api-access-k2wvk\") pod \"designate-operator-controller-manager-955677c94-npq84\" (UID: \"344fd1ea-983e-4515-aa8a-479ec0c46c81\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-npq84" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.862393 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tl5qd\" (UniqueName: \"kubernetes.io/projected/a1596ec7-ab5c-4e5e-ba3a-11772bdaa64b-kube-api-access-tl5qd\") pod \"heat-operator-controller-manager-5b77f656f-8mpkk\" (UID: \"a1596ec7-ab5c-4e5e-ba3a-11772bdaa64b\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-8mpkk" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.862444 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gchdg\" (UniqueName: \"kubernetes.io/projected/f1030448-0fd8-42d3-9a83-2e27d87c855e-kube-api-access-gchdg\") pod \"cinder-operator-controller-manager-6b7f75547b-tkwkz\" (UID: \"f1030448-0fd8-42d3-9a83-2e27d87c855e\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-tkwkz" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.862608 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glj5p\" (UniqueName: \"kubernetes.io/projected/20abfff9-9e94-466e-a2bc-487a231b86a5-kube-api-access-glj5p\") pod \"horizon-operator-controller-manager-5d494799bf-qzhwg\" (UID: \"20abfff9-9e94-466e-a2bc-487a231b86a5\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-qzhwg" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.862674 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bk45j\" (UniqueName: \"kubernetes.io/projected/39fa03d1-e77d-46bd-bc4c-d83960611145-kube-api-access-bk45j\") pod \"glance-operator-controller-manager-76f7fdd9bd-84dxm\" (UID: \"39fa03d1-e77d-46bd-bc4c-d83960611145\") " pod="openstack-operators/glance-operator-controller-manager-76f7fdd9bd-84dxm" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.864379 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-6j2nf" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.870785 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-mb2mk"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.908615 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-7rzvl"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.909526 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gchdg\" (UniqueName: \"kubernetes.io/projected/f1030448-0fd8-42d3-9a83-2e27d87c855e-kube-api-access-gchdg\") pod \"cinder-operator-controller-manager-6b7f75547b-tkwkz\" (UID: \"f1030448-0fd8-42d3-9a83-2e27d87c855e\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-tkwkz" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.916461 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-q7rbk"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.933356 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktwhl\" (UniqueName: \"kubernetes.io/projected/e287d4e5-6925-42eb-a661-fded8259123f-kube-api-access-ktwhl\") pod \"barbican-operator-controller-manager-7b64f4fb85-hmwhx\" (UID: \"e287d4e5-6925-42eb-a661-fded8259123f\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hmwhx" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.948997 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q7rbk" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.949698 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-phr8m"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.950647 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-phr8m" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.956966 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-6wpqb" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.957053 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-4ndbm" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.963784 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-m98w9"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.965025 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-m98w9" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.966085 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bk45j\" (UniqueName: \"kubernetes.io/projected/39fa03d1-e77d-46bd-bc4c-d83960611145-kube-api-access-bk45j\") pod \"glance-operator-controller-manager-76f7fdd9bd-84dxm\" (UID: \"39fa03d1-e77d-46bd-bc4c-d83960611145\") " pod="openstack-operators/glance-operator-controller-manager-76f7fdd9bd-84dxm" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.966163 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brls5\" (UniqueName: \"kubernetes.io/projected/2b6f436b-9a87-463e-a7ca-48ae08ba5f10-kube-api-access-brls5\") pod \"ironic-operator-controller-manager-67cb4dc6d4-mb2mk\" (UID: \"2b6f436b-9a87-463e-a7ca-48ae08ba5f10\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-mb2mk" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.966218 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tpr2k\" (UniqueName: \"kubernetes.io/projected/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-kube-api-access-tpr2k\") pod \"infra-operator-controller-manager-57548d458d-dhjmg\" (UID: \"cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.966244 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-cert\") pod \"infra-operator-controller-manager-57548d458d-dhjmg\" (UID: \"cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.966268 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2wvk\" (UniqueName: \"kubernetes.io/projected/344fd1ea-983e-4515-aa8a-479ec0c46c81-kube-api-access-k2wvk\") pod \"designate-operator-controller-manager-955677c94-npq84\" (UID: \"344fd1ea-983e-4515-aa8a-479ec0c46c81\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-npq84" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.966305 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqlzq\" (UniqueName: \"kubernetes.io/projected/4c3613c7-39a6-46b5-82da-a461d37d8965-kube-api-access-lqlzq\") pod \"keystone-operator-controller-manager-7b4567c7cf-7rzvl\" (UID: \"4c3613c7-39a6-46b5-82da-a461d37d8965\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-7rzvl" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.966330 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5qd\" (UniqueName: \"kubernetes.io/projected/a1596ec7-ab5c-4e5e-ba3a-11772bdaa64b-kube-api-access-tl5qd\") pod \"heat-operator-controller-manager-5b77f656f-8mpkk\" (UID: \"a1596ec7-ab5c-4e5e-ba3a-11772bdaa64b\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-8mpkk" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.966357 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glj5p\" (UniqueName: \"kubernetes.io/projected/20abfff9-9e94-466e-a2bc-487a231b86a5-kube-api-access-glj5p\") pod \"horizon-operator-controller-manager-5d494799bf-qzhwg\" (UID: \"20abfff9-9e94-466e-a2bc-487a231b86a5\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-qzhwg" Nov 25 23:14:54 crc kubenswrapper[5045]: E1125 23:14:54.967351 5045 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 23:14:54 crc kubenswrapper[5045]: E1125 23:14:54.967406 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-cert podName:cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16 nodeName:}" failed. No retries permitted until 2025-11-25 23:14:55.467388524 +0000 UTC m=+951.825047636 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-cert") pod "infra-operator-controller-manager-57548d458d-dhjmg" (UID: "cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16") : secret "infra-operator-webhook-server-cert" not found Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.968771 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-szgh6" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.970795 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-q7rbk"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.977938 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-tkgqg"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.978912 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-tkgqg" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.984745 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-m98w9"] Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.991323 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-lhb4m" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.992144 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tpr2k\" (UniqueName: \"kubernetes.io/projected/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-kube-api-access-tpr2k\") pod \"infra-operator-controller-manager-57548d458d-dhjmg\" (UID: \"cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.996807 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bk45j\" (UniqueName: \"kubernetes.io/projected/39fa03d1-e77d-46bd-bc4c-d83960611145-kube-api-access-bk45j\") pod \"glance-operator-controller-manager-76f7fdd9bd-84dxm\" (UID: \"39fa03d1-e77d-46bd-bc4c-d83960611145\") " pod="openstack-operators/glance-operator-controller-manager-76f7fdd9bd-84dxm" Nov 25 23:14:54 crc kubenswrapper[5045]: I1125 23:14:54.999060 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-phr8m"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.002297 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2wvk\" (UniqueName: \"kubernetes.io/projected/344fd1ea-983e-4515-aa8a-479ec0c46c81-kube-api-access-k2wvk\") pod \"designate-operator-controller-manager-955677c94-npq84\" (UID: \"344fd1ea-983e-4515-aa8a-479ec0c46c81\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-npq84" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.012255 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-tkgqg"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.032005 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-g4pvl"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.033022 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-g4pvl" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.038468 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-4j568" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.038837 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tl5qd\" (UniqueName: \"kubernetes.io/projected/a1596ec7-ab5c-4e5e-ba3a-11772bdaa64b-kube-api-access-tl5qd\") pod \"heat-operator-controller-manager-5b77f656f-8mpkk\" (UID: \"a1596ec7-ab5c-4e5e-ba3a-11772bdaa64b\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-8mpkk" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.040978 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-8mpkk" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.051051 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-g4pvl"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.055003 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glj5p\" (UniqueName: \"kubernetes.io/projected/20abfff9-9e94-466e-a2bc-487a231b86a5-kube-api-access-glj5p\") pod \"horizon-operator-controller-manager-5d494799bf-qzhwg\" (UID: \"20abfff9-9e94-466e-a2bc-487a231b86a5\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-qzhwg" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.055477 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.056548 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.059055 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-hk9jr" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.059210 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.064906 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-qzhwg" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.069059 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c5f48852-fbb4-429b-93c8-19121a51be4a-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r\" (UID: \"c5f48852-fbb4-429b-93c8-19121a51be4a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.069103 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7j8zt\" (UniqueName: \"kubernetes.io/projected/c5f48852-fbb4-429b-93c8-19121a51be4a-kube-api-access-7j8zt\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r\" (UID: \"c5f48852-fbb4-429b-93c8-19121a51be4a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.069123 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtvp9\" (UniqueName: \"kubernetes.io/projected/4b50dd58-f8a3-4ce4-b008-dd810e1a424d-kube-api-access-gtvp9\") pod \"nova-operator-controller-manager-79556f57fc-tkgqg\" (UID: \"4b50dd58-f8a3-4ce4-b008-dd810e1a424d\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-tkgqg" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.069154 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8jgt\" (UniqueName: \"kubernetes.io/projected/485f4bbf-205f-4ea5-8009-a0cdeb204139-kube-api-access-f8jgt\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-phr8m\" (UID: \"485f4bbf-205f-4ea5-8009-a0cdeb204139\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-phr8m" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.069174 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brls5\" (UniqueName: \"kubernetes.io/projected/2b6f436b-9a87-463e-a7ca-48ae08ba5f10-kube-api-access-brls5\") pod \"ironic-operator-controller-manager-67cb4dc6d4-mb2mk\" (UID: \"2b6f436b-9a87-463e-a7ca-48ae08ba5f10\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-mb2mk" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.069195 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxfwd\" (UniqueName: \"kubernetes.io/projected/bbcd8ad7-d4b2-4eef-979f-00b2c4e1b13e-kube-api-access-wxfwd\") pod \"octavia-operator-controller-manager-64cdc6ff96-g4pvl\" (UID: \"bbcd8ad7-d4b2-4eef-979f-00b2c4e1b13e\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-g4pvl" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.069216 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvg28\" (UniqueName: \"kubernetes.io/projected/38cf0eb2-2d59-418b-9e24-b04c72c58c9f-kube-api-access-xvg28\") pod \"neutron-operator-controller-manager-6fdcddb789-m98w9\" (UID: \"38cf0eb2-2d59-418b-9e24-b04c72c58c9f\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-m98w9" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.069298 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqlzq\" (UniqueName: \"kubernetes.io/projected/4c3613c7-39a6-46b5-82da-a461d37d8965-kube-api-access-lqlzq\") pod \"keystone-operator-controller-manager-7b4567c7cf-7rzvl\" (UID: \"4c3613c7-39a6-46b5-82da-a461d37d8965\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-7rzvl" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.069318 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mwnl\" (UniqueName: \"kubernetes.io/projected/289a9811-aa55-449c-aa82-a56f4b1ef53e-kube-api-access-9mwnl\") pod \"manila-operator-controller-manager-5d499bf58b-q7rbk\" (UID: \"289a9811-aa55-449c-aa82-a56f4b1ef53e\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q7rbk" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.085303 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.098794 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-rk9dz"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.102880 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-2pfmh"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.103382 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rk9dz" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.103796 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2pfmh" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.104068 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqlzq\" (UniqueName: \"kubernetes.io/projected/4c3613c7-39a6-46b5-82da-a461d37d8965-kube-api-access-lqlzq\") pod \"keystone-operator-controller-manager-7b4567c7cf-7rzvl\" (UID: \"4c3613c7-39a6-46b5-82da-a461d37d8965\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-7rzvl" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.107457 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-gmlb7" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.107647 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-829n2" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.112203 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-n7s8v"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.113400 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-n7s8v" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.119520 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-rk9dz"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.122981 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-sft47" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.127614 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brls5\" (UniqueName: \"kubernetes.io/projected/2b6f436b-9a87-463e-a7ca-48ae08ba5f10-kube-api-access-brls5\") pod \"ironic-operator-controller-manager-67cb4dc6d4-mb2mk\" (UID: \"2b6f436b-9a87-463e-a7ca-48ae08ba5f10\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-mb2mk" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.127682 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-2pfmh"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.135271 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-n7s8v"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.147406 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-mb2mk" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.160477 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jzkxl"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.161555 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jzkxl" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.163962 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-2rpnf" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.171878 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8jgt\" (UniqueName: \"kubernetes.io/projected/485f4bbf-205f-4ea5-8009-a0cdeb204139-kube-api-access-f8jgt\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-phr8m\" (UID: \"485f4bbf-205f-4ea5-8009-a0cdeb204139\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-phr8m" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.171919 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxfwd\" (UniqueName: \"kubernetes.io/projected/bbcd8ad7-d4b2-4eef-979f-00b2c4e1b13e-kube-api-access-wxfwd\") pod \"octavia-operator-controller-manager-64cdc6ff96-g4pvl\" (UID: \"bbcd8ad7-d4b2-4eef-979f-00b2c4e1b13e\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-g4pvl" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.171941 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvg28\" (UniqueName: \"kubernetes.io/projected/38cf0eb2-2d59-418b-9e24-b04c72c58c9f-kube-api-access-xvg28\") pod \"neutron-operator-controller-manager-6fdcddb789-m98w9\" (UID: \"38cf0eb2-2d59-418b-9e24-b04c72c58c9f\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-m98w9" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.171990 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdkvn\" (UniqueName: \"kubernetes.io/projected/9092d9d6-3e10-4f43-84e7-121153c39104-kube-api-access-tdkvn\") pod \"swift-operator-controller-manager-d77b94747-n7s8v\" (UID: \"9092d9d6-3e10-4f43-84e7-121153c39104\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-n7s8v" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.172025 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mwnl\" (UniqueName: \"kubernetes.io/projected/289a9811-aa55-449c-aa82-a56f4b1ef53e-kube-api-access-9mwnl\") pod \"manila-operator-controller-manager-5d499bf58b-q7rbk\" (UID: \"289a9811-aa55-449c-aa82-a56f4b1ef53e\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q7rbk" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.172095 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlsk6\" (UniqueName: \"kubernetes.io/projected/35229e25-460b-4c57-9dae-6dceadf19b3f-kube-api-access-wlsk6\") pod \"ovn-operator-controller-manager-56897c768d-rk9dz\" (UID: \"35229e25-460b-4c57-9dae-6dceadf19b3f\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rk9dz" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.172137 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c5f48852-fbb4-429b-93c8-19121a51be4a-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r\" (UID: \"c5f48852-fbb4-429b-93c8-19121a51be4a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.172156 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7j8zt\" (UniqueName: \"kubernetes.io/projected/c5f48852-fbb4-429b-93c8-19121a51be4a-kube-api-access-7j8zt\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r\" (UID: \"c5f48852-fbb4-429b-93c8-19121a51be4a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.172180 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtvp9\" (UniqueName: \"kubernetes.io/projected/4b50dd58-f8a3-4ce4-b008-dd810e1a424d-kube-api-access-gtvp9\") pod \"nova-operator-controller-manager-79556f57fc-tkgqg\" (UID: \"4b50dd58-f8a3-4ce4-b008-dd810e1a424d\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-tkgqg" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.172199 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wg2l\" (UniqueName: \"kubernetes.io/projected/da63a4ac-64af-4d60-b968-274c9960b665-kube-api-access-4wg2l\") pod \"placement-operator-controller-manager-57988cc5b5-2pfmh\" (UID: \"da63a4ac-64af-4d60-b968-274c9960b665\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2pfmh" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.172215 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zckfv\" (UniqueName: \"kubernetes.io/projected/39ce2a8b-211e-4bb4-91a3-0999e4f45162-kube-api-access-zckfv\") pod \"telemetry-operator-controller-manager-76cc84c6bb-jzkxl\" (UID: \"39ce2a8b-211e-4bb4-91a3-0999e4f45162\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jzkxl" Nov 25 23:14:55 crc kubenswrapper[5045]: E1125 23:14:55.172913 5045 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 23:14:55 crc kubenswrapper[5045]: E1125 23:14:55.172959 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c5f48852-fbb4-429b-93c8-19121a51be4a-cert podName:c5f48852-fbb4-429b-93c8-19121a51be4a nodeName:}" failed. No retries permitted until 2025-11-25 23:14:55.672944113 +0000 UTC m=+952.030603225 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c5f48852-fbb4-429b-93c8-19121a51be4a-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" (UID: "c5f48852-fbb4-429b-93c8-19121a51be4a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.181527 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jzkxl"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.201415 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-tkwkz" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.206274 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtvp9\" (UniqueName: \"kubernetes.io/projected/4b50dd58-f8a3-4ce4-b008-dd810e1a424d-kube-api-access-gtvp9\") pod \"nova-operator-controller-manager-79556f57fc-tkgqg\" (UID: \"4b50dd58-f8a3-4ce4-b008-dd810e1a424d\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-tkgqg" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.208645 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvg28\" (UniqueName: \"kubernetes.io/projected/38cf0eb2-2d59-418b-9e24-b04c72c58c9f-kube-api-access-xvg28\") pod \"neutron-operator-controller-manager-6fdcddb789-m98w9\" (UID: \"38cf0eb2-2d59-418b-9e24-b04c72c58c9f\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-m98w9" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.211481 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8jgt\" (UniqueName: \"kubernetes.io/projected/485f4bbf-205f-4ea5-8009-a0cdeb204139-kube-api-access-f8jgt\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-phr8m\" (UID: \"485f4bbf-205f-4ea5-8009-a0cdeb204139\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-phr8m" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.217459 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hmwhx" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.226552 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mwnl\" (UniqueName: \"kubernetes.io/projected/289a9811-aa55-449c-aa82-a56f4b1ef53e-kube-api-access-9mwnl\") pod \"manila-operator-controller-manager-5d499bf58b-q7rbk\" (UID: \"289a9811-aa55-449c-aa82-a56f4b1ef53e\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q7rbk" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.230211 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vsfg5"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.232002 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vsfg5" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.234321 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-z6lq2" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.238157 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7j8zt\" (UniqueName: \"kubernetes.io/projected/c5f48852-fbb4-429b-93c8-19121a51be4a-kube-api-access-7j8zt\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r\" (UID: \"c5f48852-fbb4-429b-93c8-19121a51be4a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.245806 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxfwd\" (UniqueName: \"kubernetes.io/projected/bbcd8ad7-d4b2-4eef-979f-00b2c4e1b13e-kube-api-access-wxfwd\") pod \"octavia-operator-controller-manager-64cdc6ff96-g4pvl\" (UID: \"bbcd8ad7-d4b2-4eef-979f-00b2c4e1b13e\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-g4pvl" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.266366 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-npq84" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.267183 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vsfg5"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.268329 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-7rzvl" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.274624 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdkvn\" (UniqueName: \"kubernetes.io/projected/9092d9d6-3e10-4f43-84e7-121153c39104-kube-api-access-tdkvn\") pod \"swift-operator-controller-manager-d77b94747-n7s8v\" (UID: \"9092d9d6-3e10-4f43-84e7-121153c39104\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-n7s8v" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.274745 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlsk6\" (UniqueName: \"kubernetes.io/projected/35229e25-460b-4c57-9dae-6dceadf19b3f-kube-api-access-wlsk6\") pod \"ovn-operator-controller-manager-56897c768d-rk9dz\" (UID: \"35229e25-460b-4c57-9dae-6dceadf19b3f\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rk9dz" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.274785 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wg2l\" (UniqueName: \"kubernetes.io/projected/da63a4ac-64af-4d60-b968-274c9960b665-kube-api-access-4wg2l\") pod \"placement-operator-controller-manager-57988cc5b5-2pfmh\" (UID: \"da63a4ac-64af-4d60-b968-274c9960b665\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2pfmh" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.274835 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zckfv\" (UniqueName: \"kubernetes.io/projected/39ce2a8b-211e-4bb4-91a3-0999e4f45162-kube-api-access-zckfv\") pod \"telemetry-operator-controller-manager-76cc84c6bb-jzkxl\" (UID: \"39ce2a8b-211e-4bb4-91a3-0999e4f45162\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jzkxl" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.306462 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-76f7fdd9bd-84dxm" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.314728 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wg2l\" (UniqueName: \"kubernetes.io/projected/da63a4ac-64af-4d60-b968-274c9960b665-kube-api-access-4wg2l\") pod \"placement-operator-controller-manager-57988cc5b5-2pfmh\" (UID: \"da63a4ac-64af-4d60-b968-274c9960b665\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2pfmh" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.319437 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlsk6\" (UniqueName: \"kubernetes.io/projected/35229e25-460b-4c57-9dae-6dceadf19b3f-kube-api-access-wlsk6\") pod \"ovn-operator-controller-manager-56897c768d-rk9dz\" (UID: \"35229e25-460b-4c57-9dae-6dceadf19b3f\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rk9dz" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.330032 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zckfv\" (UniqueName: \"kubernetes.io/projected/39ce2a8b-211e-4bb4-91a3-0999e4f45162-kube-api-access-zckfv\") pod \"telemetry-operator-controller-manager-76cc84c6bb-jzkxl\" (UID: \"39ce2a8b-211e-4bb4-91a3-0999e4f45162\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jzkxl" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.340848 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q7rbk" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.349179 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2pfmh" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.356136 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-phr8m" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.361641 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdkvn\" (UniqueName: \"kubernetes.io/projected/9092d9d6-3e10-4f43-84e7-121153c39104-kube-api-access-tdkvn\") pod \"swift-operator-controller-manager-d77b94747-n7s8v\" (UID: \"9092d9d6-3e10-4f43-84e7-121153c39104\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-n7s8v" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.363948 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-r4wcc"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.364967 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-r4wcc" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.370170 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-w626l" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.375604 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwlcs\" (UniqueName: \"kubernetes.io/projected/74cee554-ae39-4dd6-b932-dc432e32cda0-kube-api-access-cwlcs\") pod \"test-operator-controller-manager-5cd6c7f4c8-vsfg5\" (UID: \"74cee554-ae39-4dd6-b932-dc432e32cda0\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vsfg5" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.382856 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-r4wcc"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.401304 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-m98w9" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.403648 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-n7s8v" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.413649 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jzkxl" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.417821 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.419488 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.423115 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-tkgqg" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.423478 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.423572 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-rv2z9" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.429145 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.439285 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-g4pvl" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.453071 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.471125 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-85bpk"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.472108 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-85bpk" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.476581 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-cert\") pod \"infra-operator-controller-manager-57548d458d-dhjmg\" (UID: \"cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.476620 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgvx9\" (UniqueName: \"kubernetes.io/projected/eb320624-b1a8-45b6-891f-0b4517a5376e-kube-api-access-kgvx9\") pod \"watcher-operator-controller-manager-656dcb59d4-r4wcc\" (UID: \"eb320624-b1a8-45b6-891f-0b4517a5376e\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-r4wcc" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.476662 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwlcs\" (UniqueName: \"kubernetes.io/projected/74cee554-ae39-4dd6-b932-dc432e32cda0-kube-api-access-cwlcs\") pod \"test-operator-controller-manager-5cd6c7f4c8-vsfg5\" (UID: \"74cee554-ae39-4dd6-b932-dc432e32cda0\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vsfg5" Nov 25 23:14:55 crc kubenswrapper[5045]: E1125 23:14:55.477158 5045 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 23:14:55 crc kubenswrapper[5045]: E1125 23:14:55.477215 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-cert podName:cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16 nodeName:}" failed. No retries permitted until 2025-11-25 23:14:56.477200411 +0000 UTC m=+952.834859523 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-cert") pod "infra-operator-controller-manager-57548d458d-dhjmg" (UID: "cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16") : secret "infra-operator-webhook-server-cert" not found Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.478251 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-zxqbk" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.478546 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-85bpk"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.517888 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwlcs\" (UniqueName: \"kubernetes.io/projected/74cee554-ae39-4dd6-b932-dc432e32cda0-kube-api-access-cwlcs\") pod \"test-operator-controller-manager-5cd6c7f4c8-vsfg5\" (UID: \"74cee554-ae39-4dd6-b932-dc432e32cda0\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vsfg5" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.536181 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rk9dz" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.577684 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnbvt\" (UniqueName: \"kubernetes.io/projected/6c15a559-c39e-47a5-83b2-74a6e830c1b2-kube-api-access-tnbvt\") pod \"rabbitmq-cluster-operator-manager-668c99d594-85bpk\" (UID: \"6c15a559-c39e-47a5-83b2-74a6e830c1b2\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-85bpk" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.577767 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-metrics-certs\") pod \"openstack-operator-controller-manager-746744c96-rtr9q\" (UID: \"aeeca19c-1da3-4bc7-934d-fa4c8663ca04\") " pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.577831 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2n277\" (UniqueName: \"kubernetes.io/projected/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-kube-api-access-2n277\") pod \"openstack-operator-controller-manager-746744c96-rtr9q\" (UID: \"aeeca19c-1da3-4bc7-934d-fa4c8663ca04\") " pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.577913 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgvx9\" (UniqueName: \"kubernetes.io/projected/eb320624-b1a8-45b6-891f-0b4517a5376e-kube-api-access-kgvx9\") pod \"watcher-operator-controller-manager-656dcb59d4-r4wcc\" (UID: \"eb320624-b1a8-45b6-891f-0b4517a5376e\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-r4wcc" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.578411 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-webhook-certs\") pod \"openstack-operator-controller-manager-746744c96-rtr9q\" (UID: \"aeeca19c-1da3-4bc7-934d-fa4c8663ca04\") " pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.596821 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgvx9\" (UniqueName: \"kubernetes.io/projected/eb320624-b1a8-45b6-891f-0b4517a5376e-kube-api-access-kgvx9\") pod \"watcher-operator-controller-manager-656dcb59d4-r4wcc\" (UID: \"eb320624-b1a8-45b6-891f-0b4517a5376e\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-r4wcc" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.680211 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-webhook-certs\") pod \"openstack-operator-controller-manager-746744c96-rtr9q\" (UID: \"aeeca19c-1da3-4bc7-934d-fa4c8663ca04\") " pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.680290 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c5f48852-fbb4-429b-93c8-19121a51be4a-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r\" (UID: \"c5f48852-fbb4-429b-93c8-19121a51be4a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.680314 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnbvt\" (UniqueName: \"kubernetes.io/projected/6c15a559-c39e-47a5-83b2-74a6e830c1b2-kube-api-access-tnbvt\") pod \"rabbitmq-cluster-operator-manager-668c99d594-85bpk\" (UID: \"6c15a559-c39e-47a5-83b2-74a6e830c1b2\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-85bpk" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.680345 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-metrics-certs\") pod \"openstack-operator-controller-manager-746744c96-rtr9q\" (UID: \"aeeca19c-1da3-4bc7-934d-fa4c8663ca04\") " pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.680385 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2n277\" (UniqueName: \"kubernetes.io/projected/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-kube-api-access-2n277\") pod \"openstack-operator-controller-manager-746744c96-rtr9q\" (UID: \"aeeca19c-1da3-4bc7-934d-fa4c8663ca04\") " pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:14:55 crc kubenswrapper[5045]: E1125 23:14:55.680807 5045 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 23:14:55 crc kubenswrapper[5045]: E1125 23:14:55.680850 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-webhook-certs podName:aeeca19c-1da3-4bc7-934d-fa4c8663ca04 nodeName:}" failed. No retries permitted until 2025-11-25 23:14:56.180834715 +0000 UTC m=+952.538493827 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-webhook-certs") pod "openstack-operator-controller-manager-746744c96-rtr9q" (UID: "aeeca19c-1da3-4bc7-934d-fa4c8663ca04") : secret "webhook-server-cert" not found Nov 25 23:14:55 crc kubenswrapper[5045]: E1125 23:14:55.681000 5045 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 23:14:55 crc kubenswrapper[5045]: E1125 23:14:55.681022 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-metrics-certs podName:aeeca19c-1da3-4bc7-934d-fa4c8663ca04 nodeName:}" failed. No retries permitted until 2025-11-25 23:14:56.18101485 +0000 UTC m=+952.538673962 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-metrics-certs") pod "openstack-operator-controller-manager-746744c96-rtr9q" (UID: "aeeca19c-1da3-4bc7-934d-fa4c8663ca04") : secret "metrics-server-cert" not found Nov 25 23:14:55 crc kubenswrapper[5045]: E1125 23:14:55.681067 5045 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 23:14:55 crc kubenswrapper[5045]: E1125 23:14:55.681088 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c5f48852-fbb4-429b-93c8-19121a51be4a-cert podName:c5f48852-fbb4-429b-93c8-19121a51be4a nodeName:}" failed. No retries permitted until 2025-11-25 23:14:56.681082132 +0000 UTC m=+953.038741244 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c5f48852-fbb4-429b-93c8-19121a51be4a-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" (UID: "c5f48852-fbb4-429b-93c8-19121a51be4a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.701930 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2n277\" (UniqueName: \"kubernetes.io/projected/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-kube-api-access-2n277\") pod \"openstack-operator-controller-manager-746744c96-rtr9q\" (UID: \"aeeca19c-1da3-4bc7-934d-fa4c8663ca04\") " pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.709871 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnbvt\" (UniqueName: \"kubernetes.io/projected/6c15a559-c39e-47a5-83b2-74a6e830c1b2-kube-api-access-tnbvt\") pod \"rabbitmq-cluster-operator-manager-668c99d594-85bpk\" (UID: \"6c15a559-c39e-47a5-83b2-74a6e830c1b2\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-85bpk" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.735037 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vsfg5" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.746909 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-8mpkk"] Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.746980 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-r4wcc" Nov 25 23:14:55 crc kubenswrapper[5045]: W1125 23:14:55.767424 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda1596ec7_ab5c_4e5e_ba3a_11772bdaa64b.slice/crio-d860ac9a2c649c3f2aec46e7bd8477b54d051ac9df279fab8e66862389dd843c WatchSource:0}: Error finding container d860ac9a2c649c3f2aec46e7bd8477b54d051ac9df279fab8e66862389dd843c: Status 404 returned error can't find the container with id d860ac9a2c649c3f2aec46e7bd8477b54d051ac9df279fab8e66862389dd843c Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.824803 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-85bpk" Nov 25 23:14:55 crc kubenswrapper[5045]: I1125 23:14:55.981128 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-mb2mk"] Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.000978 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-mb2mk" event={"ID":"2b6f436b-9a87-463e-a7ca-48ae08ba5f10","Type":"ContainerStarted","Data":"02b8634969e41fbf0aba9e29ee528a7dad460dc374455691a19b37985a7c8615"} Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.013912 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-qzhwg"] Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.014501 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-8mpkk" event={"ID":"a1596ec7-ab5c-4e5e-ba3a-11772bdaa64b","Type":"ContainerStarted","Data":"d860ac9a2c649c3f2aec46e7bd8477b54d051ac9df279fab8e66862389dd843c"} Nov 25 23:14:56 crc kubenswrapper[5045]: W1125 23:14:56.020958 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod20abfff9_9e94_466e_a2bc_487a231b86a5.slice/crio-76fd2a6f74f741b1b413fd20579c3029b164fbfafdc5080edbdacc9dbb8a558c WatchSource:0}: Error finding container 76fd2a6f74f741b1b413fd20579c3029b164fbfafdc5080edbdacc9dbb8a558c: Status 404 returned error can't find the container with id 76fd2a6f74f741b1b413fd20579c3029b164fbfafdc5080edbdacc9dbb8a558c Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.063833 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-phr8m"] Nov 25 23:14:56 crc kubenswrapper[5045]: W1125 23:14:56.075153 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda63a4ac_64af_4d60_b968_274c9960b665.slice/crio-8863d6b35729b1dda5e53a0810708c2a376febd44cd4c8f7f24f2362548450e7 WatchSource:0}: Error finding container 8863d6b35729b1dda5e53a0810708c2a376febd44cd4c8f7f24f2362548450e7: Status 404 returned error can't find the container with id 8863d6b35729b1dda5e53a0810708c2a376febd44cd4c8f7f24f2362548450e7 Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.075194 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hmwhx"] Nov 25 23:14:56 crc kubenswrapper[5045]: W1125 23:14:56.087772 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod39fa03d1_e77d_46bd_bc4c_d83960611145.slice/crio-1f3401e264689b26276798c219a33e58b41a614623eca13cb82341af24fb1f77 WatchSource:0}: Error finding container 1f3401e264689b26276798c219a33e58b41a614623eca13cb82341af24fb1f77: Status 404 returned error can't find the container with id 1f3401e264689b26276798c219a33e58b41a614623eca13cb82341af24fb1f77 Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.090456 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-2pfmh"] Nov 25 23:14:56 crc kubenswrapper[5045]: W1125 23:14:56.092953 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf1030448_0fd8_42d3_9a83_2e27d87c855e.slice/crio-d2020e48b0f3967c908719ad2341e5aae9dbff6190b609a1d166f31ce7d11ae6 WatchSource:0}: Error finding container d2020e48b0f3967c908719ad2341e5aae9dbff6190b609a1d166f31ce7d11ae6: Status 404 returned error can't find the container with id d2020e48b0f3967c908719ad2341e5aae9dbff6190b609a1d166f31ce7d11ae6 Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.096659 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-tkwkz"] Nov 25 23:14:56 crc kubenswrapper[5045]: W1125 23:14:56.097844 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod344fd1ea_983e_4515_aa8a_479ec0c46c81.slice/crio-bd68d9cfb78d3c19952ccd883c8e03cc0dc84ac2f4950dc67618f23e261cdffd WatchSource:0}: Error finding container bd68d9cfb78d3c19952ccd883c8e03cc0dc84ac2f4950dc67618f23e261cdffd: Status 404 returned error can't find the container with id bd68d9cfb78d3c19952ccd883c8e03cc0dc84ac2f4950dc67618f23e261cdffd Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.101801 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-76f7fdd9bd-84dxm"] Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.105948 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-npq84"] Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.187504 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-metrics-certs\") pod \"openstack-operator-controller-manager-746744c96-rtr9q\" (UID: \"aeeca19c-1da3-4bc7-934d-fa4c8663ca04\") " pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.187628 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-webhook-certs\") pod \"openstack-operator-controller-manager-746744c96-rtr9q\" (UID: \"aeeca19c-1da3-4bc7-934d-fa4c8663ca04\") " pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.187770 5045 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.187838 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-webhook-certs podName:aeeca19c-1da3-4bc7-934d-fa4c8663ca04 nodeName:}" failed. No retries permitted until 2025-11-25 23:14:57.187817042 +0000 UTC m=+953.545476154 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-webhook-certs") pod "openstack-operator-controller-manager-746744c96-rtr9q" (UID: "aeeca19c-1da3-4bc7-934d-fa4c8663ca04") : secret "webhook-server-cert" not found Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.188248 5045 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.188289 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-metrics-certs podName:aeeca19c-1da3-4bc7-934d-fa4c8663ca04 nodeName:}" failed. No retries permitted until 2025-11-25 23:14:57.188281355 +0000 UTC m=+953.545940467 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-metrics-certs") pod "openstack-operator-controller-manager-746744c96-rtr9q" (UID: "aeeca19c-1da3-4bc7-934d-fa4c8663ca04") : secret "metrics-server-cert" not found Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.290832 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-g4pvl"] Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.319071 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:bbb543d2d67c73e5df5d6357c3251363eb34a99575c5bf10416edd45dbdae2f6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wlsk6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-56897c768d-rk9dz_openstack-operators(35229e25-460b-4c57-9dae-6dceadf19b3f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.319547 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-7rzvl"] Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.328283 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wlsk6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-56897c768d-rk9dz_openstack-operators(35229e25-460b-4c57-9dae-6dceadf19b3f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.329445 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rk9dz" podUID="35229e25-460b-4c57-9dae-6dceadf19b3f" Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.333884 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-n7s8v"] Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.338269 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:89910bc3ecceb7590d3207ac294eb7354de358cf39ef03c72323b26c598e50e6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9mwnl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5d499bf58b-q7rbk_openstack-operators(289a9811-aa55-449c-aa82-a56f4b1ef53e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.342892 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9mwnl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5d499bf58b-q7rbk_openstack-operators(289a9811-aa55-449c-aa82-a56f4b1ef53e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.344120 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q7rbk" podUID="289a9811-aa55-449c-aa82-a56f4b1ef53e" Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.344376 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:e00a9ed0ab26c5b745bd804ab1fe6b22428d026f17ea05a05f045e060342f46c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xvg28,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-6fdcddb789-m98w9_openstack-operators(38cf0eb2-2d59-418b-9e24-b04c72c58c9f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.344594 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zckfv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-jzkxl_openstack-operators(39ce2a8b-211e-4bb4-91a3-0999e4f45162): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.345193 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gtvp9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79556f57fc-tkgqg_openstack-operators(4b50dd58-f8a3-4ce4-b008-dd810e1a424d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.347507 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xvg28,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-6fdcddb789-m98w9_openstack-operators(38cf0eb2-2d59-418b-9e24-b04c72c58c9f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.347921 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gtvp9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79556f57fc-tkgqg_openstack-operators(4b50dd58-f8a3-4ce4-b008-dd810e1a424d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.348435 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zckfv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-jzkxl_openstack-operators(39ce2a8b-211e-4bb4-91a3-0999e4f45162): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.349207 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-m98w9" podUID="38cf0eb2-2d59-418b-9e24-b04c72c58c9f" Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.350112 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jzkxl" podUID="39ce2a8b-211e-4bb4-91a3-0999e4f45162" Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.350182 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-tkgqg" podUID="4b50dd58-f8a3-4ce4-b008-dd810e1a424d" Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.357497 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-rk9dz"] Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.370886 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-tkgqg"] Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.379151 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-q7rbk"] Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.384865 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-m98w9"] Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.393743 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jzkxl"] Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.460216 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-r4wcc"] Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.464897 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tnbvt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-85bpk_openstack-operators(6c15a559-c39e-47a5-83b2-74a6e830c1b2): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.466442 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-85bpk" podUID="6c15a559-c39e-47a5-83b2-74a6e830c1b2" Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.472676 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-85bpk"] Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.484700 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vsfg5"] Nov 25 23:14:56 crc kubenswrapper[5045]: W1125 23:14:56.485812 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod74cee554_ae39_4dd6_b932_dc432e32cda0.slice/crio-3cf8a571a8e9c3e6819e7f01501bb97933ac2b47dd4d035e7553a494182c1faa WatchSource:0}: Error finding container 3cf8a571a8e9c3e6819e7f01501bb97933ac2b47dd4d035e7553a494182c1faa: Status 404 returned error can't find the container with id 3cf8a571a8e9c3e6819e7f01501bb97933ac2b47dd4d035e7553a494182c1faa Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.487801 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cwlcs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cd6c7f4c8-vsfg5_openstack-operators(74cee554-ae39-4dd6-b932-dc432e32cda0): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.491777 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cwlcs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cd6c7f4c8-vsfg5_openstack-operators(74cee554-ae39-4dd6-b932-dc432e32cda0): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.493271 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-cert\") pod \"infra-operator-controller-manager-57548d458d-dhjmg\" (UID: \"cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg" Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.493651 5045 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.493696 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-cert podName:cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16 nodeName:}" failed. No retries permitted until 2025-11-25 23:14:58.493680324 +0000 UTC m=+954.851339456 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-cert") pod "infra-operator-controller-manager-57548d458d-dhjmg" (UID: "cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16") : secret "infra-operator-webhook-server-cert" not found Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.493770 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vsfg5" podUID="74cee554-ae39-4dd6-b932-dc432e32cda0" Nov 25 23:14:56 crc kubenswrapper[5045]: I1125 23:14:56.696517 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c5f48852-fbb4-429b-93c8-19121a51be4a-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r\" (UID: \"c5f48852-fbb4-429b-93c8-19121a51be4a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.696678 5045 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 23:14:56 crc kubenswrapper[5045]: E1125 23:14:56.696763 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c5f48852-fbb4-429b-93c8-19121a51be4a-cert podName:c5f48852-fbb4-429b-93c8-19121a51be4a nodeName:}" failed. No retries permitted until 2025-11-25 23:14:58.696747063 +0000 UTC m=+955.054406175 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c5f48852-fbb4-429b-93c8-19121a51be4a-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" (UID: "c5f48852-fbb4-429b-93c8-19121a51be4a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 23:14:57 crc kubenswrapper[5045]: I1125 23:14:57.024349 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-qzhwg" event={"ID":"20abfff9-9e94-466e-a2bc-487a231b86a5","Type":"ContainerStarted","Data":"76fd2a6f74f741b1b413fd20579c3029b164fbfafdc5080edbdacc9dbb8a558c"} Nov 25 23:14:57 crc kubenswrapper[5045]: I1125 23:14:57.025677 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-tkwkz" event={"ID":"f1030448-0fd8-42d3-9a83-2e27d87c855e","Type":"ContainerStarted","Data":"d2020e48b0f3967c908719ad2341e5aae9dbff6190b609a1d166f31ce7d11ae6"} Nov 25 23:14:57 crc kubenswrapper[5045]: I1125 23:14:57.027305 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-n7s8v" event={"ID":"9092d9d6-3e10-4f43-84e7-121153c39104","Type":"ContainerStarted","Data":"69ae3b03c2fbd6164c33965587eb2af5f62ead34ec218c5fe176ee54cc0236d9"} Nov 25 23:14:57 crc kubenswrapper[5045]: I1125 23:14:57.029961 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q7rbk" event={"ID":"289a9811-aa55-449c-aa82-a56f4b1ef53e","Type":"ContainerStarted","Data":"f4b5fe7d1d8705c3b396f36279779b37f4fe92159874c1b5a384832f3fcf7e86"} Nov 25 23:14:57 crc kubenswrapper[5045]: I1125 23:14:57.031686 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-npq84" event={"ID":"344fd1ea-983e-4515-aa8a-479ec0c46c81","Type":"ContainerStarted","Data":"bd68d9cfb78d3c19952ccd883c8e03cc0dc84ac2f4950dc67618f23e261cdffd"} Nov 25 23:14:57 crc kubenswrapper[5045]: E1125 23:14:57.032705 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:89910bc3ecceb7590d3207ac294eb7354de358cf39ef03c72323b26c598e50e6\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q7rbk" podUID="289a9811-aa55-449c-aa82-a56f4b1ef53e" Nov 25 23:14:57 crc kubenswrapper[5045]: I1125 23:14:57.034164 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jzkxl" event={"ID":"39ce2a8b-211e-4bb4-91a3-0999e4f45162","Type":"ContainerStarted","Data":"880ca51506ef50886f08d9a8065230adb5b42621a60b3ac3591ed418e80b2f8e"} Nov 25 23:14:57 crc kubenswrapper[5045]: I1125 23:14:57.035325 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2pfmh" event={"ID":"da63a4ac-64af-4d60-b968-274c9960b665","Type":"ContainerStarted","Data":"8863d6b35729b1dda5e53a0810708c2a376febd44cd4c8f7f24f2362548450e7"} Nov 25 23:14:57 crc kubenswrapper[5045]: E1125 23:14:57.036802 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jzkxl" podUID="39ce2a8b-211e-4bb4-91a3-0999e4f45162" Nov 25 23:14:57 crc kubenswrapper[5045]: I1125 23:14:57.038115 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-tkgqg" event={"ID":"4b50dd58-f8a3-4ce4-b008-dd810e1a424d","Type":"ContainerStarted","Data":"7fb2488455e4e99bc8089674c8c3978343d4c25abe4fe9def81e042c472eafc3"} Nov 25 23:14:57 crc kubenswrapper[5045]: E1125 23:14:57.040404 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-tkgqg" podUID="4b50dd58-f8a3-4ce4-b008-dd810e1a424d" Nov 25 23:14:57 crc kubenswrapper[5045]: I1125 23:14:57.040501 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-85bpk" event={"ID":"6c15a559-c39e-47a5-83b2-74a6e830c1b2","Type":"ContainerStarted","Data":"7b3544ef8a49c06f4d128e6cc174d61ceaf552b0e7639a0e5afd7457226fa5d1"} Nov 25 23:14:57 crc kubenswrapper[5045]: E1125 23:14:57.041477 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-85bpk" podUID="6c15a559-c39e-47a5-83b2-74a6e830c1b2" Nov 25 23:14:57 crc kubenswrapper[5045]: I1125 23:14:57.041970 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-g4pvl" event={"ID":"bbcd8ad7-d4b2-4eef-979f-00b2c4e1b13e","Type":"ContainerStarted","Data":"4d14616e85f1e30f41ab4b2039b95c5b8afd3c85467a4d2232278d94ba191874"} Nov 25 23:14:57 crc kubenswrapper[5045]: I1125 23:14:57.046180 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-m98w9" event={"ID":"38cf0eb2-2d59-418b-9e24-b04c72c58c9f","Type":"ContainerStarted","Data":"02a15e323f338c5da3e0e0500f8869808123205338df4a2672b17d9af7bfae8c"} Nov 25 23:14:57 crc kubenswrapper[5045]: I1125 23:14:57.048645 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vsfg5" event={"ID":"74cee554-ae39-4dd6-b932-dc432e32cda0","Type":"ContainerStarted","Data":"3cf8a571a8e9c3e6819e7f01501bb97933ac2b47dd4d035e7553a494182c1faa"} Nov 25 23:14:57 crc kubenswrapper[5045]: E1125 23:14:57.051347 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:e00a9ed0ab26c5b745bd804ab1fe6b22428d026f17ea05a05f045e060342f46c\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-m98w9" podUID="38cf0eb2-2d59-418b-9e24-b04c72c58c9f" Nov 25 23:14:57 crc kubenswrapper[5045]: E1125 23:14:57.051528 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vsfg5" podUID="74cee554-ae39-4dd6-b932-dc432e32cda0" Nov 25 23:14:57 crc kubenswrapper[5045]: I1125 23:14:57.051640 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hmwhx" event={"ID":"e287d4e5-6925-42eb-a661-fded8259123f","Type":"ContainerStarted","Data":"4d9ab9db69016da20ae2af98d875da5ca7e38e813815af54c51dceb28c935b59"} Nov 25 23:14:57 crc kubenswrapper[5045]: I1125 23:14:57.055216 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-76f7fdd9bd-84dxm" event={"ID":"39fa03d1-e77d-46bd-bc4c-d83960611145","Type":"ContainerStarted","Data":"1f3401e264689b26276798c219a33e58b41a614623eca13cb82341af24fb1f77"} Nov 25 23:14:57 crc kubenswrapper[5045]: I1125 23:14:57.057595 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-r4wcc" event={"ID":"eb320624-b1a8-45b6-891f-0b4517a5376e","Type":"ContainerStarted","Data":"e6195bcbdfd644dd3e5ff367e7d529356be02b177b329e7da97bb96a49167088"} Nov 25 23:14:57 crc kubenswrapper[5045]: I1125 23:14:57.060688 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-phr8m" event={"ID":"485f4bbf-205f-4ea5-8009-a0cdeb204139","Type":"ContainerStarted","Data":"b206e7254332b7972f904a5db640d11a5c22bc2ea9accf6e12a0b52c8219b870"} Nov 25 23:14:57 crc kubenswrapper[5045]: I1125 23:14:57.061621 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rk9dz" event={"ID":"35229e25-460b-4c57-9dae-6dceadf19b3f","Type":"ContainerStarted","Data":"031fd478fc6c3c6c6063993cd654c70f055d61359d3633979f211bd7ab3131ae"} Nov 25 23:14:57 crc kubenswrapper[5045]: E1125 23:14:57.066507 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:bbb543d2d67c73e5df5d6357c3251363eb34a99575c5bf10416edd45dbdae2f6\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rk9dz" podUID="35229e25-460b-4c57-9dae-6dceadf19b3f" Nov 25 23:14:57 crc kubenswrapper[5045]: I1125 23:14:57.067913 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-7rzvl" event={"ID":"4c3613c7-39a6-46b5-82da-a461d37d8965","Type":"ContainerStarted","Data":"9d0f75f960a0e11bbee4c5e1e56ac4212dc860eda3fd2cc1a73837773265a93d"} Nov 25 23:14:57 crc kubenswrapper[5045]: I1125 23:14:57.203510 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-metrics-certs\") pod \"openstack-operator-controller-manager-746744c96-rtr9q\" (UID: \"aeeca19c-1da3-4bc7-934d-fa4c8663ca04\") " pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:14:57 crc kubenswrapper[5045]: E1125 23:14:57.203636 5045 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 23:14:57 crc kubenswrapper[5045]: E1125 23:14:57.203701 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-metrics-certs podName:aeeca19c-1da3-4bc7-934d-fa4c8663ca04 nodeName:}" failed. No retries permitted until 2025-11-25 23:14:59.203683868 +0000 UTC m=+955.561342970 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-metrics-certs") pod "openstack-operator-controller-manager-746744c96-rtr9q" (UID: "aeeca19c-1da3-4bc7-934d-fa4c8663ca04") : secret "metrics-server-cert" not found Nov 25 23:14:57 crc kubenswrapper[5045]: E1125 23:14:57.203749 5045 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 23:14:57 crc kubenswrapper[5045]: E1125 23:14:57.203989 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-webhook-certs podName:aeeca19c-1da3-4bc7-934d-fa4c8663ca04 nodeName:}" failed. No retries permitted until 2025-11-25 23:14:59.203971947 +0000 UTC m=+955.561631059 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-webhook-certs") pod "openstack-operator-controller-manager-746744c96-rtr9q" (UID: "aeeca19c-1da3-4bc7-934d-fa4c8663ca04") : secret "webhook-server-cert" not found Nov 25 23:14:57 crc kubenswrapper[5045]: I1125 23:14:57.203644 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-webhook-certs\") pod \"openstack-operator-controller-manager-746744c96-rtr9q\" (UID: \"aeeca19c-1da3-4bc7-934d-fa4c8663ca04\") " pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:14:58 crc kubenswrapper[5045]: E1125 23:14:58.084506 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-85bpk" podUID="6c15a559-c39e-47a5-83b2-74a6e830c1b2" Nov 25 23:14:58 crc kubenswrapper[5045]: E1125 23:14:58.084917 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:e00a9ed0ab26c5b745bd804ab1fe6b22428d026f17ea05a05f045e060342f46c\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-m98w9" podUID="38cf0eb2-2d59-418b-9e24-b04c72c58c9f" Nov 25 23:14:58 crc kubenswrapper[5045]: E1125 23:14:58.084968 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vsfg5" podUID="74cee554-ae39-4dd6-b932-dc432e32cda0" Nov 25 23:14:58 crc kubenswrapper[5045]: E1125 23:14:58.085017 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:89910bc3ecceb7590d3207ac294eb7354de358cf39ef03c72323b26c598e50e6\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q7rbk" podUID="289a9811-aa55-449c-aa82-a56f4b1ef53e" Nov 25 23:14:58 crc kubenswrapper[5045]: E1125 23:14:58.098846 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-tkgqg" podUID="4b50dd58-f8a3-4ce4-b008-dd810e1a424d" Nov 25 23:14:58 crc kubenswrapper[5045]: E1125 23:14:58.098929 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:bbb543d2d67c73e5df5d6357c3251363eb34a99575c5bf10416edd45dbdae2f6\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rk9dz" podUID="35229e25-460b-4c57-9dae-6dceadf19b3f" Nov 25 23:14:58 crc kubenswrapper[5045]: E1125 23:14:58.098993 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jzkxl" podUID="39ce2a8b-211e-4bb4-91a3-0999e4f45162" Nov 25 23:14:58 crc kubenswrapper[5045]: I1125 23:14:58.537877 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-cert\") pod \"infra-operator-controller-manager-57548d458d-dhjmg\" (UID: \"cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg" Nov 25 23:14:58 crc kubenswrapper[5045]: E1125 23:14:58.538032 5045 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 23:14:58 crc kubenswrapper[5045]: E1125 23:14:58.538075 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-cert podName:cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16 nodeName:}" failed. No retries permitted until 2025-11-25 23:15:02.538062127 +0000 UTC m=+958.895721239 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-cert") pod "infra-operator-controller-manager-57548d458d-dhjmg" (UID: "cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16") : secret "infra-operator-webhook-server-cert" not found Nov 25 23:14:58 crc kubenswrapper[5045]: I1125 23:14:58.741506 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c5f48852-fbb4-429b-93c8-19121a51be4a-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r\" (UID: \"c5f48852-fbb4-429b-93c8-19121a51be4a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" Nov 25 23:14:58 crc kubenswrapper[5045]: E1125 23:14:58.741738 5045 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 23:14:58 crc kubenswrapper[5045]: E1125 23:14:58.741818 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c5f48852-fbb4-429b-93c8-19121a51be4a-cert podName:c5f48852-fbb4-429b-93c8-19121a51be4a nodeName:}" failed. No retries permitted until 2025-11-25 23:15:02.741791934 +0000 UTC m=+959.099451046 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c5f48852-fbb4-429b-93c8-19121a51be4a-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" (UID: "c5f48852-fbb4-429b-93c8-19121a51be4a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 23:14:59 crc kubenswrapper[5045]: I1125 23:14:59.251256 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-metrics-certs\") pod \"openstack-operator-controller-manager-746744c96-rtr9q\" (UID: \"aeeca19c-1da3-4bc7-934d-fa4c8663ca04\") " pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:14:59 crc kubenswrapper[5045]: I1125 23:14:59.251386 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-webhook-certs\") pod \"openstack-operator-controller-manager-746744c96-rtr9q\" (UID: \"aeeca19c-1da3-4bc7-934d-fa4c8663ca04\") " pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:14:59 crc kubenswrapper[5045]: E1125 23:14:59.251574 5045 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 23:14:59 crc kubenswrapper[5045]: E1125 23:14:59.251632 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-webhook-certs podName:aeeca19c-1da3-4bc7-934d-fa4c8663ca04 nodeName:}" failed. No retries permitted until 2025-11-25 23:15:03.251612691 +0000 UTC m=+959.609271803 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-webhook-certs") pod "openstack-operator-controller-manager-746744c96-rtr9q" (UID: "aeeca19c-1da3-4bc7-934d-fa4c8663ca04") : secret "webhook-server-cert" not found Nov 25 23:14:59 crc kubenswrapper[5045]: E1125 23:14:59.252030 5045 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 23:14:59 crc kubenswrapper[5045]: E1125 23:14:59.252065 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-metrics-certs podName:aeeca19c-1da3-4bc7-934d-fa4c8663ca04 nodeName:}" failed. No retries permitted until 2025-11-25 23:15:03.252055253 +0000 UTC m=+959.609714365 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-metrics-certs") pod "openstack-operator-controller-manager-746744c96-rtr9q" (UID: "aeeca19c-1da3-4bc7-934d-fa4c8663ca04") : secret "metrics-server-cert" not found Nov 25 23:15:00 crc kubenswrapper[5045]: I1125 23:15:00.144460 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv"] Nov 25 23:15:00 crc kubenswrapper[5045]: I1125 23:15:00.145793 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv" Nov 25 23:15:00 crc kubenswrapper[5045]: I1125 23:15:00.158859 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 23:15:00 crc kubenswrapper[5045]: I1125 23:15:00.160314 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 23:15:00 crc kubenswrapper[5045]: I1125 23:15:00.162316 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv"] Nov 25 23:15:00 crc kubenswrapper[5045]: I1125 23:15:00.266468 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a274ea0e-8ce3-46aa-84e7-21bf65cd00ae-secret-volume\") pod \"collect-profiles-29401875-g5mrv\" (UID: \"a274ea0e-8ce3-46aa-84e7-21bf65cd00ae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv" Nov 25 23:15:00 crc kubenswrapper[5045]: I1125 23:15:00.266528 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pt8mh\" (UniqueName: \"kubernetes.io/projected/a274ea0e-8ce3-46aa-84e7-21bf65cd00ae-kube-api-access-pt8mh\") pod \"collect-profiles-29401875-g5mrv\" (UID: \"a274ea0e-8ce3-46aa-84e7-21bf65cd00ae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv" Nov 25 23:15:00 crc kubenswrapper[5045]: I1125 23:15:00.266566 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a274ea0e-8ce3-46aa-84e7-21bf65cd00ae-config-volume\") pod \"collect-profiles-29401875-g5mrv\" (UID: \"a274ea0e-8ce3-46aa-84e7-21bf65cd00ae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv" Nov 25 23:15:00 crc kubenswrapper[5045]: I1125 23:15:00.367817 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a274ea0e-8ce3-46aa-84e7-21bf65cd00ae-secret-volume\") pod \"collect-profiles-29401875-g5mrv\" (UID: \"a274ea0e-8ce3-46aa-84e7-21bf65cd00ae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv" Nov 25 23:15:00 crc kubenswrapper[5045]: I1125 23:15:00.367893 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pt8mh\" (UniqueName: \"kubernetes.io/projected/a274ea0e-8ce3-46aa-84e7-21bf65cd00ae-kube-api-access-pt8mh\") pod \"collect-profiles-29401875-g5mrv\" (UID: \"a274ea0e-8ce3-46aa-84e7-21bf65cd00ae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv" Nov 25 23:15:00 crc kubenswrapper[5045]: I1125 23:15:00.367932 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a274ea0e-8ce3-46aa-84e7-21bf65cd00ae-config-volume\") pod \"collect-profiles-29401875-g5mrv\" (UID: \"a274ea0e-8ce3-46aa-84e7-21bf65cd00ae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv" Nov 25 23:15:00 crc kubenswrapper[5045]: I1125 23:15:00.368929 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a274ea0e-8ce3-46aa-84e7-21bf65cd00ae-config-volume\") pod \"collect-profiles-29401875-g5mrv\" (UID: \"a274ea0e-8ce3-46aa-84e7-21bf65cd00ae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv" Nov 25 23:15:00 crc kubenswrapper[5045]: I1125 23:15:00.374727 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a274ea0e-8ce3-46aa-84e7-21bf65cd00ae-secret-volume\") pod \"collect-profiles-29401875-g5mrv\" (UID: \"a274ea0e-8ce3-46aa-84e7-21bf65cd00ae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv" Nov 25 23:15:00 crc kubenswrapper[5045]: I1125 23:15:00.382672 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pt8mh\" (UniqueName: \"kubernetes.io/projected/a274ea0e-8ce3-46aa-84e7-21bf65cd00ae-kube-api-access-pt8mh\") pod \"collect-profiles-29401875-g5mrv\" (UID: \"a274ea0e-8ce3-46aa-84e7-21bf65cd00ae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv" Nov 25 23:15:00 crc kubenswrapper[5045]: I1125 23:15:00.479585 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv" Nov 25 23:15:00 crc kubenswrapper[5045]: I1125 23:15:00.540583 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:15:00 crc kubenswrapper[5045]: I1125 23:15:00.540642 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:15:00 crc kubenswrapper[5045]: I1125 23:15:00.540690 5045 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 23:15:00 crc kubenswrapper[5045]: I1125 23:15:00.541347 5045 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ac9b25e6d63635c2d6571dedc7552214abf9eda1f3832a75a6240c6a0f672f51"} pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 23:15:00 crc kubenswrapper[5045]: I1125 23:15:00.541414 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" containerID="cri-o://ac9b25e6d63635c2d6571dedc7552214abf9eda1f3832a75a6240c6a0f672f51" gracePeriod=600 Nov 25 23:15:01 crc kubenswrapper[5045]: I1125 23:15:01.118013 5045 generic.go:334] "Generic (PLEG): container finished" podID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerID="ac9b25e6d63635c2d6571dedc7552214abf9eda1f3832a75a6240c6a0f672f51" exitCode=0 Nov 25 23:15:01 crc kubenswrapper[5045]: I1125 23:15:01.118061 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerDied","Data":"ac9b25e6d63635c2d6571dedc7552214abf9eda1f3832a75a6240c6a0f672f51"} Nov 25 23:15:01 crc kubenswrapper[5045]: I1125 23:15:01.118137 5045 scope.go:117] "RemoveContainer" containerID="b92b5d9b7e6348e5c2d32780968d464c8df43db35693b94adce9b088fa04e458" Nov 25 23:15:02 crc kubenswrapper[5045]: I1125 23:15:02.603692 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-cert\") pod \"infra-operator-controller-manager-57548d458d-dhjmg\" (UID: \"cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg" Nov 25 23:15:02 crc kubenswrapper[5045]: E1125 23:15:02.603894 5045 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 23:15:02 crc kubenswrapper[5045]: E1125 23:15:02.603963 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-cert podName:cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16 nodeName:}" failed. No retries permitted until 2025-11-25 23:15:10.603944473 +0000 UTC m=+966.961603585 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-cert") pod "infra-operator-controller-manager-57548d458d-dhjmg" (UID: "cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16") : secret "infra-operator-webhook-server-cert" not found Nov 25 23:15:02 crc kubenswrapper[5045]: I1125 23:15:02.808786 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c5f48852-fbb4-429b-93c8-19121a51be4a-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r\" (UID: \"c5f48852-fbb4-429b-93c8-19121a51be4a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" Nov 25 23:15:02 crc kubenswrapper[5045]: E1125 23:15:02.809048 5045 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 23:15:02 crc kubenswrapper[5045]: E1125 23:15:02.809177 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c5f48852-fbb4-429b-93c8-19121a51be4a-cert podName:c5f48852-fbb4-429b-93c8-19121a51be4a nodeName:}" failed. No retries permitted until 2025-11-25 23:15:10.809126021 +0000 UTC m=+967.166785173 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c5f48852-fbb4-429b-93c8-19121a51be4a-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" (UID: "c5f48852-fbb4-429b-93c8-19121a51be4a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 23:15:03 crc kubenswrapper[5045]: I1125 23:15:03.315970 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-metrics-certs\") pod \"openstack-operator-controller-manager-746744c96-rtr9q\" (UID: \"aeeca19c-1da3-4bc7-934d-fa4c8663ca04\") " pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:15:03 crc kubenswrapper[5045]: I1125 23:15:03.316111 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-webhook-certs\") pod \"openstack-operator-controller-manager-746744c96-rtr9q\" (UID: \"aeeca19c-1da3-4bc7-934d-fa4c8663ca04\") " pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:15:03 crc kubenswrapper[5045]: E1125 23:15:03.316147 5045 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 23:15:03 crc kubenswrapper[5045]: E1125 23:15:03.316222 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-metrics-certs podName:aeeca19c-1da3-4bc7-934d-fa4c8663ca04 nodeName:}" failed. No retries permitted until 2025-11-25 23:15:11.316205061 +0000 UTC m=+967.673864173 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-metrics-certs") pod "openstack-operator-controller-manager-746744c96-rtr9q" (UID: "aeeca19c-1da3-4bc7-934d-fa4c8663ca04") : secret "metrics-server-cert" not found Nov 25 23:15:03 crc kubenswrapper[5045]: E1125 23:15:03.316249 5045 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 23:15:03 crc kubenswrapper[5045]: E1125 23:15:03.316313 5045 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-webhook-certs podName:aeeca19c-1da3-4bc7-934d-fa4c8663ca04 nodeName:}" failed. No retries permitted until 2025-11-25 23:15:11.316296334 +0000 UTC m=+967.673955456 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-webhook-certs") pod "openstack-operator-controller-manager-746744c96-rtr9q" (UID: "aeeca19c-1da3-4bc7-934d-fa4c8663ca04") : secret "webhook-server-cert" not found Nov 25 23:15:09 crc kubenswrapper[5045]: I1125 23:15:09.178057 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-r4wcc" event={"ID":"eb320624-b1a8-45b6-891f-0b4517a5376e","Type":"ContainerStarted","Data":"41f7c8e103a0487b641feb70b8821b40be390a4d59ac936ad1d41a4f6cb0e6d6"} Nov 25 23:15:09 crc kubenswrapper[5045]: I1125 23:15:09.184316 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-n7s8v" event={"ID":"9092d9d6-3e10-4f43-84e7-121153c39104","Type":"ContainerStarted","Data":"e8eb546d8d1a1861082898613f9e1ade2da2427dfe6e41bb1a673a1593e12a99"} Nov 25 23:15:09 crc kubenswrapper[5045]: I1125 23:15:09.189634 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-npq84" event={"ID":"344fd1ea-983e-4515-aa8a-479ec0c46c81","Type":"ContainerStarted","Data":"efdfdc8e9405be85257d1e3a6db2c47d0b8e78b5f1c6d91f95e7299e55fd6e7f"} Nov 25 23:15:09 crc kubenswrapper[5045]: I1125 23:15:09.197054 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerStarted","Data":"ad71cc49c7fac24fcefe379082dd68e60c6857b81de28d84310c7d3a35f4b46a"} Nov 25 23:15:09 crc kubenswrapper[5045]: I1125 23:15:09.201926 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-qzhwg" event={"ID":"20abfff9-9e94-466e-a2bc-487a231b86a5","Type":"ContainerStarted","Data":"952b2ae3469dc702b0f27857f34a18f1525e1f5609b87545a9497afae2df4175"} Nov 25 23:15:09 crc kubenswrapper[5045]: I1125 23:15:09.299935 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv"] Nov 25 23:15:09 crc kubenswrapper[5045]: E1125 23:15:09.456912 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lqlzq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7b4567c7cf-7rzvl_openstack-operators(4c3613c7-39a6-46b5-82da-a461d37d8965): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 23:15:09 crc kubenswrapper[5045]: E1125 23:15:09.458012 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-7rzvl" podUID="4c3613c7-39a6-46b5-82da-a461d37d8965" Nov 25 23:15:09 crc kubenswrapper[5045]: E1125 23:15:09.499263 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gchdg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-6b7f75547b-tkwkz_openstack-operators(f1030448-0fd8-42d3-9a83-2e27d87c855e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 23:15:09 crc kubenswrapper[5045]: E1125 23:15:09.501588 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-tkwkz" podUID="f1030448-0fd8-42d3-9a83-2e27d87c855e" Nov 25 23:15:10 crc kubenswrapper[5045]: I1125 23:15:10.229254 5045 generic.go:334] "Generic (PLEG): container finished" podID="a274ea0e-8ce3-46aa-84e7-21bf65cd00ae" containerID="3472988225de79e28a81de5061a3c36f8b924634fb3d4afa1ab133e1753c8c5d" exitCode=0 Nov 25 23:15:10 crc kubenswrapper[5045]: I1125 23:15:10.229581 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv" event={"ID":"a274ea0e-8ce3-46aa-84e7-21bf65cd00ae","Type":"ContainerDied","Data":"3472988225de79e28a81de5061a3c36f8b924634fb3d4afa1ab133e1753c8c5d"} Nov 25 23:15:10 crc kubenswrapper[5045]: I1125 23:15:10.229608 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv" event={"ID":"a274ea0e-8ce3-46aa-84e7-21bf65cd00ae","Type":"ContainerStarted","Data":"88d319af80180aa713fb819177a11ae301ff26a88fa30120eec02f774f1623ef"} Nov 25 23:15:10 crc kubenswrapper[5045]: I1125 23:15:10.231826 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-tkwkz" event={"ID":"f1030448-0fd8-42d3-9a83-2e27d87c855e","Type":"ContainerStarted","Data":"5c7c05da8ceba212b94dba4a7731dea3579e01e073ff66013f33dbcc5ee237e9"} Nov 25 23:15:10 crc kubenswrapper[5045]: I1125 23:15:10.232630 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-tkwkz" Nov 25 23:15:10 crc kubenswrapper[5045]: E1125 23:15:10.233628 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-tkwkz" podUID="f1030448-0fd8-42d3-9a83-2e27d87c855e" Nov 25 23:15:10 crc kubenswrapper[5045]: I1125 23:15:10.254987 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-7rzvl" event={"ID":"4c3613c7-39a6-46b5-82da-a461d37d8965","Type":"ContainerStarted","Data":"713231f63f36cc16a7d36023113ca8ff7b7a060b7fdf8764812ca6021af4e616"} Nov 25 23:15:10 crc kubenswrapper[5045]: I1125 23:15:10.255066 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-7rzvl" Nov 25 23:15:10 crc kubenswrapper[5045]: E1125 23:15:10.258487 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-7rzvl" podUID="4c3613c7-39a6-46b5-82da-a461d37d8965" Nov 25 23:15:10 crc kubenswrapper[5045]: I1125 23:15:10.260014 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-mb2mk" event={"ID":"2b6f436b-9a87-463e-a7ca-48ae08ba5f10","Type":"ContainerStarted","Data":"9c9d3b03ef82edb43132b332e948a8a1f4f0e37568df28fbe185d1fa500a5ad4"} Nov 25 23:15:10 crc kubenswrapper[5045]: I1125 23:15:10.298102 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-phr8m" event={"ID":"485f4bbf-205f-4ea5-8009-a0cdeb204139","Type":"ContainerStarted","Data":"0cdbc1c778d71022fb7b5bc9020aeed91781c426d845021a5cdb85d0c72c6fc4"} Nov 25 23:15:10 crc kubenswrapper[5045]: I1125 23:15:10.326902 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-76f7fdd9bd-84dxm" event={"ID":"39fa03d1-e77d-46bd-bc4c-d83960611145","Type":"ContainerStarted","Data":"4afaa88ab8f7447b0346fd87fa413c84b32f394ac54911628da6275f199a0f1a"} Nov 25 23:15:10 crc kubenswrapper[5045]: I1125 23:15:10.343692 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-8mpkk" event={"ID":"a1596ec7-ab5c-4e5e-ba3a-11772bdaa64b","Type":"ContainerStarted","Data":"ef0381696ae3092bc06172eb61790cbc40a200d30baf7cd66f235ab1be371559"} Nov 25 23:15:10 crc kubenswrapper[5045]: I1125 23:15:10.351981 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hmwhx" event={"ID":"e287d4e5-6925-42eb-a661-fded8259123f","Type":"ContainerStarted","Data":"dc11026ad0ae7dafa8051f2e725cce0ac5e44ffe50caca71e4421f21e7f79067"} Nov 25 23:15:10 crc kubenswrapper[5045]: I1125 23:15:10.353765 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2pfmh" event={"ID":"da63a4ac-64af-4d60-b968-274c9960b665","Type":"ContainerStarted","Data":"61ad7efb0e8d54e15f89d1944080cf7badaa78fac7aeb5ca4651efa7908031a2"} Nov 25 23:15:10 crc kubenswrapper[5045]: I1125 23:15:10.357239 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-g4pvl" event={"ID":"bbcd8ad7-d4b2-4eef-979f-00b2c4e1b13e","Type":"ContainerStarted","Data":"bac3e86cd1c094cbf9c56d2293f85e8fd716e44edad1cf05afa31d350de0bb01"} Nov 25 23:15:10 crc kubenswrapper[5045]: I1125 23:15:10.642615 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-cert\") pod \"infra-operator-controller-manager-57548d458d-dhjmg\" (UID: \"cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg" Nov 25 23:15:10 crc kubenswrapper[5045]: I1125 23:15:10.652001 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16-cert\") pod \"infra-operator-controller-manager-57548d458d-dhjmg\" (UID: \"cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg" Nov 25 23:15:10 crc kubenswrapper[5045]: I1125 23:15:10.679678 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-c4zhs" Nov 25 23:15:10 crc kubenswrapper[5045]: I1125 23:15:10.687408 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg" Nov 25 23:15:10 crc kubenswrapper[5045]: I1125 23:15:10.845492 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c5f48852-fbb4-429b-93c8-19121a51be4a-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r\" (UID: \"c5f48852-fbb4-429b-93c8-19121a51be4a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" Nov 25 23:15:10 crc kubenswrapper[5045]: I1125 23:15:10.863108 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c5f48852-fbb4-429b-93c8-19121a51be4a-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r\" (UID: \"c5f48852-fbb4-429b-93c8-19121a51be4a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" Nov 25 23:15:11 crc kubenswrapper[5045]: I1125 23:15:11.069412 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-hk9jr" Nov 25 23:15:11 crc kubenswrapper[5045]: I1125 23:15:11.078438 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" Nov 25 23:15:11 crc kubenswrapper[5045]: I1125 23:15:11.352269 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-metrics-certs\") pod \"openstack-operator-controller-manager-746744c96-rtr9q\" (UID: \"aeeca19c-1da3-4bc7-934d-fa4c8663ca04\") " pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:15:11 crc kubenswrapper[5045]: I1125 23:15:11.352361 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-webhook-certs\") pod \"openstack-operator-controller-manager-746744c96-rtr9q\" (UID: \"aeeca19c-1da3-4bc7-934d-fa4c8663ca04\") " pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:15:11 crc kubenswrapper[5045]: I1125 23:15:11.357596 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-metrics-certs\") pod \"openstack-operator-controller-manager-746744c96-rtr9q\" (UID: \"aeeca19c-1da3-4bc7-934d-fa4c8663ca04\") " pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:15:11 crc kubenswrapper[5045]: I1125 23:15:11.360445 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/aeeca19c-1da3-4bc7-934d-fa4c8663ca04-webhook-certs\") pod \"openstack-operator-controller-manager-746744c96-rtr9q\" (UID: \"aeeca19c-1da3-4bc7-934d-fa4c8663ca04\") " pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:15:11 crc kubenswrapper[5045]: I1125 23:15:11.362612 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-rv2z9" Nov 25 23:15:11 crc kubenswrapper[5045]: E1125 23:15:11.370113 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-7rzvl" podUID="4c3613c7-39a6-46b5-82da-a461d37d8965" Nov 25 23:15:11 crc kubenswrapper[5045]: E1125 23:15:11.370482 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-tkwkz" podUID="f1030448-0fd8-42d3-9a83-2e27d87c855e" Nov 25 23:15:11 crc kubenswrapper[5045]: I1125 23:15:11.386144 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:15:11 crc kubenswrapper[5045]: I1125 23:15:11.677287 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv" Nov 25 23:15:11 crc kubenswrapper[5045]: I1125 23:15:11.759094 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pt8mh\" (UniqueName: \"kubernetes.io/projected/a274ea0e-8ce3-46aa-84e7-21bf65cd00ae-kube-api-access-pt8mh\") pod \"a274ea0e-8ce3-46aa-84e7-21bf65cd00ae\" (UID: \"a274ea0e-8ce3-46aa-84e7-21bf65cd00ae\") " Nov 25 23:15:11 crc kubenswrapper[5045]: I1125 23:15:11.759499 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a274ea0e-8ce3-46aa-84e7-21bf65cd00ae-secret-volume\") pod \"a274ea0e-8ce3-46aa-84e7-21bf65cd00ae\" (UID: \"a274ea0e-8ce3-46aa-84e7-21bf65cd00ae\") " Nov 25 23:15:11 crc kubenswrapper[5045]: I1125 23:15:11.759534 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a274ea0e-8ce3-46aa-84e7-21bf65cd00ae-config-volume\") pod \"a274ea0e-8ce3-46aa-84e7-21bf65cd00ae\" (UID: \"a274ea0e-8ce3-46aa-84e7-21bf65cd00ae\") " Nov 25 23:15:11 crc kubenswrapper[5045]: I1125 23:15:11.760627 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a274ea0e-8ce3-46aa-84e7-21bf65cd00ae-config-volume" (OuterVolumeSpecName: "config-volume") pod "a274ea0e-8ce3-46aa-84e7-21bf65cd00ae" (UID: "a274ea0e-8ce3-46aa-84e7-21bf65cd00ae"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:15:11 crc kubenswrapper[5045]: I1125 23:15:11.764869 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a274ea0e-8ce3-46aa-84e7-21bf65cd00ae-kube-api-access-pt8mh" (OuterVolumeSpecName: "kube-api-access-pt8mh") pod "a274ea0e-8ce3-46aa-84e7-21bf65cd00ae" (UID: "a274ea0e-8ce3-46aa-84e7-21bf65cd00ae"). InnerVolumeSpecName "kube-api-access-pt8mh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:15:11 crc kubenswrapper[5045]: I1125 23:15:11.765801 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a274ea0e-8ce3-46aa-84e7-21bf65cd00ae-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a274ea0e-8ce3-46aa-84e7-21bf65cd00ae" (UID: "a274ea0e-8ce3-46aa-84e7-21bf65cd00ae"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:15:11 crc kubenswrapper[5045]: I1125 23:15:11.862060 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pt8mh\" (UniqueName: \"kubernetes.io/projected/a274ea0e-8ce3-46aa-84e7-21bf65cd00ae-kube-api-access-pt8mh\") on node \"crc\" DevicePath \"\"" Nov 25 23:15:11 crc kubenswrapper[5045]: I1125 23:15:11.862085 5045 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a274ea0e-8ce3-46aa-84e7-21bf65cd00ae-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 23:15:11 crc kubenswrapper[5045]: I1125 23:15:11.862094 5045 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a274ea0e-8ce3-46aa-84e7-21bf65cd00ae-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 23:15:12 crc kubenswrapper[5045]: I1125 23:15:12.077457 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r"] Nov 25 23:15:12 crc kubenswrapper[5045]: W1125 23:15:12.166705 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc5f48852_fbb4_429b_93c8_19121a51be4a.slice/crio-94421297d60740f64f9b3d309b17d97f09645a3b3b48a716bf2d03da344618f4 WatchSource:0}: Error finding container 94421297d60740f64f9b3d309b17d97f09645a3b3b48a716bf2d03da344618f4: Status 404 returned error can't find the container with id 94421297d60740f64f9b3d309b17d97f09645a3b3b48a716bf2d03da344618f4 Nov 25 23:15:12 crc kubenswrapper[5045]: I1125 23:15:12.233298 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q"] Nov 25 23:15:12 crc kubenswrapper[5045]: I1125 23:15:12.301690 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg"] Nov 25 23:15:12 crc kubenswrapper[5045]: I1125 23:15:12.388083 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv" Nov 25 23:15:12 crc kubenswrapper[5045]: I1125 23:15:12.389074 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv" event={"ID":"a274ea0e-8ce3-46aa-84e7-21bf65cd00ae","Type":"ContainerDied","Data":"88d319af80180aa713fb819177a11ae301ff26a88fa30120eec02f774f1623ef"} Nov 25 23:15:12 crc kubenswrapper[5045]: I1125 23:15:12.389116 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="88d319af80180aa713fb819177a11ae301ff26a88fa30120eec02f774f1623ef" Nov 25 23:15:12 crc kubenswrapper[5045]: I1125 23:15:12.391218 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" event={"ID":"c5f48852-fbb4-429b-93c8-19121a51be4a","Type":"ContainerStarted","Data":"94421297d60740f64f9b3d309b17d97f09645a3b3b48a716bf2d03da344618f4"} Nov 25 23:15:13 crc kubenswrapper[5045]: I1125 23:15:13.398089 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" event={"ID":"aeeca19c-1da3-4bc7-934d-fa4c8663ca04","Type":"ContainerStarted","Data":"2a0197cceeec1d2a1e69e932a47e6b08f4f7bf504fe390213d24197535d043af"} Nov 25 23:15:13 crc kubenswrapper[5045]: I1125 23:15:13.400046 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg" event={"ID":"cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16","Type":"ContainerStarted","Data":"be000b175ae6652f0e01d7a63890e134a233468dcfd1843a91e2612b761e40ba"} Nov 25 23:15:15 crc kubenswrapper[5045]: I1125 23:15:15.204782 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-tkwkz" Nov 25 23:15:15 crc kubenswrapper[5045]: E1125 23:15:15.206846 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-tkwkz" podUID="f1030448-0fd8-42d3-9a83-2e27d87c855e" Nov 25 23:15:15 crc kubenswrapper[5045]: I1125 23:15:15.271249 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-7rzvl" Nov 25 23:15:15 crc kubenswrapper[5045]: E1125 23:15:15.272917 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-7rzvl" podUID="4c3613c7-39a6-46b5-82da-a461d37d8965" Nov 25 23:15:35 crc kubenswrapper[5045]: E1125 23:15:35.283498 5045 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Nov 25 23:15:35 crc kubenswrapper[5045]: E1125 23:15:35.285077 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tnbvt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-85bpk_openstack-operators(6c15a559-c39e-47a5-83b2-74a6e830c1b2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 23:15:35 crc kubenswrapper[5045]: E1125 23:15:35.286239 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-85bpk" podUID="6c15a559-c39e-47a5-83b2-74a6e830c1b2" Nov 25 23:15:35 crc kubenswrapper[5045]: I1125 23:15:35.586694 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" event={"ID":"aeeca19c-1da3-4bc7-934d-fa4c8663ca04","Type":"ContainerStarted","Data":"6ed7e26ae46fd993f030106c69c5c9fdd2966c9099aea2ff297092e1c3116eb5"} Nov 25 23:15:35 crc kubenswrapper[5045]: I1125 23:15:35.587389 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:15:35 crc kubenswrapper[5045]: I1125 23:15:35.629583 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" podStartSLOduration=40.629562541 podStartE2EDuration="40.629562541s" podCreationTimestamp="2025-11-25 23:14:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:15:35.62124977 +0000 UTC m=+991.978908892" watchObservedRunningTime="2025-11-25 23:15:35.629562541 +0000 UTC m=+991.987221653" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.627184 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jzkxl" event={"ID":"39ce2a8b-211e-4bb4-91a3-0999e4f45162","Type":"ContainerStarted","Data":"a190768d482785a230a67cd4e83320eedad9c30735800c76d7868468d591fb79"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.639046 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-tkgqg" event={"ID":"4b50dd58-f8a3-4ce4-b008-dd810e1a424d","Type":"ContainerStarted","Data":"6ad518863f85b9b10c33b8f71de7dd247190a41497062b425b1e2a56092c858c"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.648368 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-g4pvl" event={"ID":"bbcd8ad7-d4b2-4eef-979f-00b2c4e1b13e","Type":"ContainerStarted","Data":"7cc66e3a9eb557e6a9bbe3933ccbc743f757f6c6c3e1ac30e052fe4a75ead5ca"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.649584 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-g4pvl" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.650891 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q7rbk" event={"ID":"289a9811-aa55-449c-aa82-a56f4b1ef53e","Type":"ContainerStarted","Data":"9022243dcd3cd1a7160aa62b3a74b620a18c351e8b9e997d1f766c7603a8306e"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.652374 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rk9dz" event={"ID":"35229e25-460b-4c57-9dae-6dceadf19b3f","Type":"ContainerStarted","Data":"abe533ee10f77995a122eb5aab1f9d65d6426e0377af0d2b8d604eaf4c6a6668"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.652706 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-g4pvl" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.660938 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vsfg5" event={"ID":"74cee554-ae39-4dd6-b932-dc432e32cda0","Type":"ContainerStarted","Data":"0bd1898e4f7c59c681ac849024b71513f92c15cdb150dc1123094c06cb192e46"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.674468 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-qzhwg" event={"ID":"20abfff9-9e94-466e-a2bc-487a231b86a5","Type":"ContainerStarted","Data":"66ed05fbba8868ef53d84050278a93c1900a77834eaf1a5b9c4d13cc807da93e"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.677007 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-qzhwg" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.677557 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-qzhwg" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.678156 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-g4pvl" podStartSLOduration=3.704427068 podStartE2EDuration="42.678143995s" podCreationTimestamp="2025-11-25 23:14:54 +0000 UTC" firstStartedPulling="2025-11-25 23:14:56.30982175 +0000 UTC m=+952.667480862" lastFinishedPulling="2025-11-25 23:15:35.283538677 +0000 UTC m=+991.641197789" observedRunningTime="2025-11-25 23:15:36.670211975 +0000 UTC m=+993.027871097" watchObservedRunningTime="2025-11-25 23:15:36.678143995 +0000 UTC m=+993.035803107" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.678463 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-mb2mk" event={"ID":"2b6f436b-9a87-463e-a7ca-48ae08ba5f10","Type":"ContainerStarted","Data":"94497909d4c8bd8ea6ff6dd5970d3701274e34d4bef206c29c682d27a8b7058a"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.680052 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-mb2mk" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.682214 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-mb2mk" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.690006 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hmwhx" event={"ID":"e287d4e5-6925-42eb-a661-fded8259123f","Type":"ContainerStarted","Data":"b5046aa7ef705b2d0e00eb6f554c6ee58e9e2019da48bec6806304b262ed5307"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.693068 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hmwhx" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.695046 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hmwhx" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.703930 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-n7s8v" event={"ID":"9092d9d6-3e10-4f43-84e7-121153c39104","Type":"ContainerStarted","Data":"3a91ac76849c1e60ad391959ee5aad6bffa75579b206f057abc188741083c0a0"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.704797 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-n7s8v" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.709294 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d77b94747-n7s8v" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.712592 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg" event={"ID":"cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16","Type":"ContainerStarted","Data":"e4031a69dbacdce8d27466fbe0aae04fde10b770827680288a98243f503e6890"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.718235 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-phr8m" event={"ID":"485f4bbf-205f-4ea5-8009-a0cdeb204139","Type":"ContainerStarted","Data":"f724a7cc6cd61893573e200b461a5196a90be7448e5b93cee7dde9578cdd53e9"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.719174 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-phr8m" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.720391 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-76f7fdd9bd-84dxm" event={"ID":"39fa03d1-e77d-46bd-bc4c-d83960611145","Type":"ContainerStarted","Data":"8e88d9805e2555f3296fa0ffa4e317e7c9b8801f03aa8205b305560593a19fd0"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.720839 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-76f7fdd9bd-84dxm" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.727229 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-phr8m" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.729920 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-76f7fdd9bd-84dxm" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.733683 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-qzhwg" podStartSLOduration=3.480281765 podStartE2EDuration="42.733666808s" podCreationTimestamp="2025-11-25 23:14:54 +0000 UTC" firstStartedPulling="2025-11-25 23:14:56.024833986 +0000 UTC m=+952.382493088" lastFinishedPulling="2025-11-25 23:15:35.278219009 +0000 UTC m=+991.635878131" observedRunningTime="2025-11-25 23:15:36.727565179 +0000 UTC m=+993.085224291" watchObservedRunningTime="2025-11-25 23:15:36.733666808 +0000 UTC m=+993.091325920" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.736241 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2pfmh" event={"ID":"da63a4ac-64af-4d60-b968-274c9960b665","Type":"ContainerStarted","Data":"a4157d41f5255e6c3dcfbbc78b18df6e211f0291a45149064cb09b0781da4ccc"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.737076 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2pfmh" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.740010 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2pfmh" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.752214 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hmwhx" podStartSLOduration=3.555560897 podStartE2EDuration="42.752194223s" podCreationTimestamp="2025-11-25 23:14:54 +0000 UTC" firstStartedPulling="2025-11-25 23:14:56.086291063 +0000 UTC m=+952.443950175" lastFinishedPulling="2025-11-25 23:15:35.282924389 +0000 UTC m=+991.640583501" observedRunningTime="2025-11-25 23:15:36.750262539 +0000 UTC m=+993.107921671" watchObservedRunningTime="2025-11-25 23:15:36.752194223 +0000 UTC m=+993.109853335" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.761042 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-r4wcc" event={"ID":"eb320624-b1a8-45b6-891f-0b4517a5376e","Type":"ContainerStarted","Data":"f588f70be0ef8f2088db5aa11460f0bdc7ab06773a492f975fb871a2163d0fbd"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.761987 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-r4wcc" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.763462 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-r4wcc" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.788120 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-m98w9" event={"ID":"38cf0eb2-2d59-418b-9e24-b04c72c58c9f","Type":"ContainerStarted","Data":"39ce1e83fc622be32135a4d4c136155a5f65ca58a9d9e7dc13a792047e6ff0e7"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.797168 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-mb2mk" podStartSLOduration=3.486507804 podStartE2EDuration="42.797150052s" podCreationTimestamp="2025-11-25 23:14:54 +0000 UTC" firstStartedPulling="2025-11-25 23:14:55.992678098 +0000 UTC m=+952.350337210" lastFinishedPulling="2025-11-25 23:15:35.303320326 +0000 UTC m=+991.660979458" observedRunningTime="2025-11-25 23:15:36.794117538 +0000 UTC m=+993.151776640" watchObservedRunningTime="2025-11-25 23:15:36.797150052 +0000 UTC m=+993.154809164" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.807236 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-tkwkz" event={"ID":"f1030448-0fd8-42d3-9a83-2e27d87c855e","Type":"ContainerStarted","Data":"6fbf10cf0c0aa818ab2c859c4006a787fb087a9ad312c55d696de9e850ac2460"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.833836 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-8mpkk" event={"ID":"a1596ec7-ab5c-4e5e-ba3a-11772bdaa64b","Type":"ContainerStarted","Data":"116a33fa87b4b27b2e94472284a51f7a50aecf3d71fb501e2b1fa2ffc69b087c"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.834823 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-8mpkk" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.836436 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-8mpkk" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.853052 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-7rzvl" event={"ID":"4c3613c7-39a6-46b5-82da-a461d37d8965","Type":"ContainerStarted","Data":"897d47b7a54b48ac36a76ff150e07f4a716aeca3b862076f568b81661afaa582"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.863914 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-76f7fdd9bd-84dxm" podStartSLOduration=20.466750261 podStartE2EDuration="42.863893607s" podCreationTimestamp="2025-11-25 23:14:54 +0000 UTC" firstStartedPulling="2025-11-25 23:14:56.089806752 +0000 UTC m=+952.447465864" lastFinishedPulling="2025-11-25 23:15:18.486950078 +0000 UTC m=+974.844609210" observedRunningTime="2025-11-25 23:15:36.857559341 +0000 UTC m=+993.215218453" watchObservedRunningTime="2025-11-25 23:15:36.863893607 +0000 UTC m=+993.221552729" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.864957 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-r4wcc" podStartSLOduration=19.929421413 podStartE2EDuration="41.864949486s" podCreationTimestamp="2025-11-25 23:14:55 +0000 UTC" firstStartedPulling="2025-11-25 23:14:56.464287444 +0000 UTC m=+952.821946556" lastFinishedPulling="2025-11-25 23:15:18.399815487 +0000 UTC m=+974.757474629" observedRunningTime="2025-11-25 23:15:36.830334764 +0000 UTC m=+993.187993896" watchObservedRunningTime="2025-11-25 23:15:36.864949486 +0000 UTC m=+993.222608608" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.889364 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-npq84" event={"ID":"344fd1ea-983e-4515-aa8a-479ec0c46c81","Type":"ContainerStarted","Data":"8656bf179d30362d052c4525d41d4e43c4aab4eb59831c4a70a6607fb1999868"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.890409 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-npq84" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.906043 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-955677c94-npq84" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.907838 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" event={"ID":"c5f48852-fbb4-429b-93c8-19121a51be4a","Type":"ContainerStarted","Data":"b4c8fd931e50b858fb1cd53d418f188f5e1dc3406ae684f89328f4ab0ecd7a9a"} Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.966550 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-d77b94747-n7s8v" podStartSLOduration=2.960112927 podStartE2EDuration="41.966529418s" podCreationTimestamp="2025-11-25 23:14:55 +0000 UTC" firstStartedPulling="2025-11-25 23:14:56.296119533 +0000 UTC m=+952.653778645" lastFinishedPulling="2025-11-25 23:15:35.302536024 +0000 UTC m=+991.660195136" observedRunningTime="2025-11-25 23:15:36.927055202 +0000 UTC m=+993.284714304" watchObservedRunningTime="2025-11-25 23:15:36.966529418 +0000 UTC m=+993.324188530" Nov 25 23:15:36 crc kubenswrapper[5045]: I1125 23:15:36.967207 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2pfmh" podStartSLOduration=3.7908005620000003 podStartE2EDuration="42.967201637s" podCreationTimestamp="2025-11-25 23:14:54 +0000 UTC" firstStartedPulling="2025-11-25 23:14:56.085519801 +0000 UTC m=+952.443178913" lastFinishedPulling="2025-11-25 23:15:35.261920876 +0000 UTC m=+991.619579988" observedRunningTime="2025-11-25 23:15:36.959806652 +0000 UTC m=+993.317465764" watchObservedRunningTime="2025-11-25 23:15:36.967201637 +0000 UTC m=+993.324860759" Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:36.999099 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-phr8m" podStartSLOduration=3.798820253 podStartE2EDuration="42.999084003s" podCreationTimestamp="2025-11-25 23:14:54 +0000 UTC" firstStartedPulling="2025-11-25 23:14:56.081418055 +0000 UTC m=+952.439077167" lastFinishedPulling="2025-11-25 23:15:35.281681805 +0000 UTC m=+991.639340917" observedRunningTime="2025-11-25 23:15:36.994071454 +0000 UTC m=+993.351730566" watchObservedRunningTime="2025-11-25 23:15:36.999084003 +0000 UTC m=+993.356743115" Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.024596 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-tkwkz" podStartSLOduration=30.209038171 podStartE2EDuration="43.024580941s" podCreationTimestamp="2025-11-25 23:14:54 +0000 UTC" firstStartedPulling="2025-11-25 23:14:56.099579879 +0000 UTC m=+952.457238991" lastFinishedPulling="2025-11-25 23:15:08.915122659 +0000 UTC m=+965.272781761" observedRunningTime="2025-11-25 23:15:37.021865466 +0000 UTC m=+993.379524608" watchObservedRunningTime="2025-11-25 23:15:37.024580941 +0000 UTC m=+993.382240053" Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.096647 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-955677c94-npq84" podStartSLOduration=3.938357039 podStartE2EDuration="43.096626493s" podCreationTimestamp="2025-11-25 23:14:54 +0000 UTC" firstStartedPulling="2025-11-25 23:14:56.100294149 +0000 UTC m=+952.457953261" lastFinishedPulling="2025-11-25 23:15:35.258563583 +0000 UTC m=+991.616222715" observedRunningTime="2025-11-25 23:15:37.092315823 +0000 UTC m=+993.449974935" watchObservedRunningTime="2025-11-25 23:15:37.096626493 +0000 UTC m=+993.454285605" Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.096774 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-8mpkk" podStartSLOduration=3.602698611 podStartE2EDuration="43.096769107s" podCreationTimestamp="2025-11-25 23:14:54 +0000 UTC" firstStartedPulling="2025-11-25 23:14:55.779494293 +0000 UTC m=+952.137153395" lastFinishedPulling="2025-11-25 23:15:35.273564779 +0000 UTC m=+991.631223891" observedRunningTime="2025-11-25 23:15:37.068027359 +0000 UTC m=+993.425686471" watchObservedRunningTime="2025-11-25 23:15:37.096769107 +0000 UTC m=+993.454428209" Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.119281 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-7rzvl" podStartSLOduration=30.581184233 podStartE2EDuration="43.119265272s" podCreationTimestamp="2025-11-25 23:14:54 +0000 UTC" firstStartedPulling="2025-11-25 23:14:56.2988523 +0000 UTC m=+952.656511422" lastFinishedPulling="2025-11-25 23:15:08.836933349 +0000 UTC m=+965.194592461" observedRunningTime="2025-11-25 23:15:37.117363109 +0000 UTC m=+993.475022221" watchObservedRunningTime="2025-11-25 23:15:37.119265272 +0000 UTC m=+993.476924384" Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.939292 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vsfg5" event={"ID":"74cee554-ae39-4dd6-b932-dc432e32cda0","Type":"ContainerStarted","Data":"cbe2bc742dc88c51b42898cf91b4a11568770f20c12a12182fbf799a462d6b31"} Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.939498 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vsfg5" Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.941522 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" event={"ID":"c5f48852-fbb4-429b-93c8-19121a51be4a","Type":"ContainerStarted","Data":"7b66b98cdfd3d134f2dc55394cf9bde96d9a508375c7ec177f3f0b0eced0b622"} Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.941683 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.943318 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jzkxl" event={"ID":"39ce2a8b-211e-4bb4-91a3-0999e4f45162","Type":"ContainerStarted","Data":"503899f2f05ed9a94a14ec5c77f21087c7a152ed2e880dd8c5c12a779d7b266a"} Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.943394 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jzkxl" Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.945101 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-tkgqg" event={"ID":"4b50dd58-f8a3-4ce4-b008-dd810e1a424d","Type":"ContainerStarted","Data":"f39c4d8ae25eeff00df3c2d6ff8de8646f4c58ffeadbfa0755b21c9364412566"} Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.945149 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-tkgqg" Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.946671 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg" event={"ID":"cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16","Type":"ContainerStarted","Data":"f93f4ab41f7a152c0f6889cdefad92f1a091cc8d614facbbdf05681c724d19a1"} Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.946873 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg" Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.948665 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-m98w9" event={"ID":"38cf0eb2-2d59-418b-9e24-b04c72c58c9f","Type":"ContainerStarted","Data":"97f1927182c02c334e337d84f416ebefba63d68f6d3cccbc88c8b4a15bfad6aa"} Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.948810 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-m98w9" Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.950668 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q7rbk" event={"ID":"289a9811-aa55-449c-aa82-a56f4b1ef53e","Type":"ContainerStarted","Data":"a5677b57e1329d196d70bd00c869baf5163175644cf05fcd29f16df7a799db0d"} Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.950809 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q7rbk" Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.953826 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rk9dz" event={"ID":"35229e25-460b-4c57-9dae-6dceadf19b3f","Type":"ContainerStarted","Data":"c5be3bdaa3885885850a0da0e8f4f79571aacf562b91665448091108ad0c858e"} Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.954358 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rk9dz" Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.964484 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vsfg5" podStartSLOduration=4.189896057 podStartE2EDuration="42.964466637s" podCreationTimestamp="2025-11-25 23:14:55 +0000 UTC" firstStartedPulling="2025-11-25 23:14:56.487644214 +0000 UTC m=+952.845303346" lastFinishedPulling="2025-11-25 23:15:35.262214794 +0000 UTC m=+991.619873926" observedRunningTime="2025-11-25 23:15:37.959814287 +0000 UTC m=+994.317473409" watchObservedRunningTime="2025-11-25 23:15:37.964466637 +0000 UTC m=+994.322125739" Nov 25 23:15:37 crc kubenswrapper[5045]: I1125 23:15:37.977499 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-m98w9" podStartSLOduration=21.922489409 podStartE2EDuration="43.977487878s" podCreationTimestamp="2025-11-25 23:14:54 +0000 UTC" firstStartedPulling="2025-11-25 23:14:56.344264203 +0000 UTC m=+952.701923305" lastFinishedPulling="2025-11-25 23:15:18.399262632 +0000 UTC m=+974.756921774" observedRunningTime="2025-11-25 23:15:37.97502743 +0000 UTC m=+994.332686542" watchObservedRunningTime="2025-11-25 23:15:37.977487878 +0000 UTC m=+994.335146990" Nov 25 23:15:38 crc kubenswrapper[5045]: I1125 23:15:38.005566 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" podStartSLOduration=20.908313488 podStartE2EDuration="44.005547438s" podCreationTimestamp="2025-11-25 23:14:54 +0000 UTC" firstStartedPulling="2025-11-25 23:15:12.185440313 +0000 UTC m=+968.543099425" lastFinishedPulling="2025-11-25 23:15:35.282674262 +0000 UTC m=+991.640333375" observedRunningTime="2025-11-25 23:15:38.000445166 +0000 UTC m=+994.358104288" watchObservedRunningTime="2025-11-25 23:15:38.005547438 +0000 UTC m=+994.363206550" Nov 25 23:15:38 crc kubenswrapper[5045]: I1125 23:15:38.016887 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg" podStartSLOduration=21.197238053 podStartE2EDuration="44.016872823s" podCreationTimestamp="2025-11-25 23:14:54 +0000 UTC" firstStartedPulling="2025-11-25 23:15:12.452402777 +0000 UTC m=+968.810061889" lastFinishedPulling="2025-11-25 23:15:35.272037547 +0000 UTC m=+991.629696659" observedRunningTime="2025-11-25 23:15:38.014010813 +0000 UTC m=+994.371669915" watchObservedRunningTime="2025-11-25 23:15:38.016872823 +0000 UTC m=+994.374531935" Nov 25 23:15:38 crc kubenswrapper[5045]: I1125 23:15:38.039517 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-tkgqg" podStartSLOduration=5.102063949 podStartE2EDuration="44.039495791s" podCreationTimestamp="2025-11-25 23:14:54 +0000 UTC" firstStartedPulling="2025-11-25 23:14:56.345001214 +0000 UTC m=+952.702660326" lastFinishedPulling="2025-11-25 23:15:35.282433056 +0000 UTC m=+991.640092168" observedRunningTime="2025-11-25 23:15:38.037620269 +0000 UTC m=+994.395279381" watchObservedRunningTime="2025-11-25 23:15:38.039495791 +0000 UTC m=+994.397154903" Nov 25 23:15:38 crc kubenswrapper[5045]: I1125 23:15:38.065646 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rk9dz" podStartSLOduration=8.992897575 podStartE2EDuration="44.065628257s" podCreationTimestamp="2025-11-25 23:14:54 +0000 UTC" firstStartedPulling="2025-11-25 23:14:56.318852935 +0000 UTC m=+952.676512047" lastFinishedPulling="2025-11-25 23:15:31.391583617 +0000 UTC m=+987.749242729" observedRunningTime="2025-11-25 23:15:38.064382513 +0000 UTC m=+994.422041625" watchObservedRunningTime="2025-11-25 23:15:38.065628257 +0000 UTC m=+994.423287369" Nov 25 23:15:38 crc kubenswrapper[5045]: I1125 23:15:38.122863 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jzkxl" podStartSLOduration=4.171652263 podStartE2EDuration="43.122832957s" podCreationTimestamp="2025-11-25 23:14:55 +0000 UTC" firstStartedPulling="2025-11-25 23:14:56.344456059 +0000 UTC m=+952.702115161" lastFinishedPulling="2025-11-25 23:15:35.295636743 +0000 UTC m=+991.653295855" observedRunningTime="2025-11-25 23:15:38.093979835 +0000 UTC m=+994.451638947" watchObservedRunningTime="2025-11-25 23:15:38.122832957 +0000 UTC m=+994.480492069" Nov 25 23:15:38 crc kubenswrapper[5045]: I1125 23:15:38.128352 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q7rbk" podStartSLOduration=22.216571048 podStartE2EDuration="44.12832774s" podCreationTimestamp="2025-11-25 23:14:54 +0000 UTC" firstStartedPulling="2025-11-25 23:14:56.33814177 +0000 UTC m=+952.695800872" lastFinishedPulling="2025-11-25 23:15:18.249898442 +0000 UTC m=+974.607557564" observedRunningTime="2025-11-25 23:15:38.121173941 +0000 UTC m=+994.478833053" watchObservedRunningTime="2025-11-25 23:15:38.12832774 +0000 UTC m=+994.485986852" Nov 25 23:15:40 crc kubenswrapper[5045]: I1125 23:15:40.693459 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-dhjmg" Nov 25 23:15:41 crc kubenswrapper[5045]: I1125 23:15:41.088345 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r" Nov 25 23:15:41 crc kubenswrapper[5045]: I1125 23:15:41.396176 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-746744c96-rtr9q" Nov 25 23:15:45 crc kubenswrapper[5045]: I1125 23:15:45.345464 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q7rbk" Nov 25 23:15:45 crc kubenswrapper[5045]: I1125 23:15:45.408254 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-m98w9" Nov 25 23:15:45 crc kubenswrapper[5045]: I1125 23:15:45.424020 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jzkxl" Nov 25 23:15:45 crc kubenswrapper[5045]: I1125 23:15:45.430512 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-tkgqg" Nov 25 23:15:45 crc kubenswrapper[5045]: I1125 23:15:45.543011 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rk9dz" Nov 25 23:15:45 crc kubenswrapper[5045]: I1125 23:15:45.738184 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vsfg5" Nov 25 23:15:47 crc kubenswrapper[5045]: E1125 23:15:47.400151 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-85bpk" podUID="6c15a559-c39e-47a5-83b2-74a6e830c1b2" Nov 25 23:16:02 crc kubenswrapper[5045]: I1125 23:16:02.400207 5045 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 23:16:04 crc kubenswrapper[5045]: I1125 23:16:04.178413 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-85bpk" event={"ID":"6c15a559-c39e-47a5-83b2-74a6e830c1b2","Type":"ContainerStarted","Data":"84c81a3a76ea90e0ffcae761bb8d1be8c281030ec6de31dc9a8e851d5deb08d1"} Nov 25 23:16:04 crc kubenswrapper[5045]: I1125 23:16:04.199301 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-85bpk" podStartSLOduration=2.029568401 podStartE2EDuration="1m9.199277978s" podCreationTimestamp="2025-11-25 23:14:55 +0000 UTC" firstStartedPulling="2025-11-25 23:14:56.464761687 +0000 UTC m=+952.822420799" lastFinishedPulling="2025-11-25 23:16:03.634471234 +0000 UTC m=+1019.992130376" observedRunningTime="2025-11-25 23:16:04.196935043 +0000 UTC m=+1020.554594205" watchObservedRunningTime="2025-11-25 23:16:04.199277978 +0000 UTC m=+1020.556937100" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.671198 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-9w8jb"] Nov 25 23:16:19 crc kubenswrapper[5045]: E1125 23:16:19.672819 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a274ea0e-8ce3-46aa-84e7-21bf65cd00ae" containerName="collect-profiles" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.673106 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="a274ea0e-8ce3-46aa-84e7-21bf65cd00ae" containerName="collect-profiles" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.673330 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="a274ea0e-8ce3-46aa-84e7-21bf65cd00ae" containerName="collect-profiles" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.674102 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-9w8jb" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.679552 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.679906 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.679912 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-lx4px" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.679988 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.692163 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-9w8jb"] Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.736980 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-q5hp8"] Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.738089 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-q5hp8" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.739566 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.747894 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a414eff5-9a43-4ada-a8f2-a5a4451f3097-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-q5hp8\" (UID: \"a414eff5-9a43-4ada-a8f2-a5a4451f3097\") " pod="openstack/dnsmasq-dns-78dd6ddcc-q5hp8" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.747930 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a414eff5-9a43-4ada-a8f2-a5a4451f3097-config\") pod \"dnsmasq-dns-78dd6ddcc-q5hp8\" (UID: \"a414eff5-9a43-4ada-a8f2-a5a4451f3097\") " pod="openstack/dnsmasq-dns-78dd6ddcc-q5hp8" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.747955 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96861537-7a24-4321-a52a-bfbdadaa396a-config\") pod \"dnsmasq-dns-675f4bcbfc-9w8jb\" (UID: \"96861537-7a24-4321-a52a-bfbdadaa396a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-9w8jb" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.748008 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhp87\" (UniqueName: \"kubernetes.io/projected/a414eff5-9a43-4ada-a8f2-a5a4451f3097-kube-api-access-rhp87\") pod \"dnsmasq-dns-78dd6ddcc-q5hp8\" (UID: \"a414eff5-9a43-4ada-a8f2-a5a4451f3097\") " pod="openstack/dnsmasq-dns-78dd6ddcc-q5hp8" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.748043 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlfxg\" (UniqueName: \"kubernetes.io/projected/96861537-7a24-4321-a52a-bfbdadaa396a-kube-api-access-xlfxg\") pod \"dnsmasq-dns-675f4bcbfc-9w8jb\" (UID: \"96861537-7a24-4321-a52a-bfbdadaa396a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-9w8jb" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.785347 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-q5hp8"] Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.848994 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a414eff5-9a43-4ada-a8f2-a5a4451f3097-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-q5hp8\" (UID: \"a414eff5-9a43-4ada-a8f2-a5a4451f3097\") " pod="openstack/dnsmasq-dns-78dd6ddcc-q5hp8" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.849036 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a414eff5-9a43-4ada-a8f2-a5a4451f3097-config\") pod \"dnsmasq-dns-78dd6ddcc-q5hp8\" (UID: \"a414eff5-9a43-4ada-a8f2-a5a4451f3097\") " pod="openstack/dnsmasq-dns-78dd6ddcc-q5hp8" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.849064 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96861537-7a24-4321-a52a-bfbdadaa396a-config\") pod \"dnsmasq-dns-675f4bcbfc-9w8jb\" (UID: \"96861537-7a24-4321-a52a-bfbdadaa396a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-9w8jb" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.849092 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhp87\" (UniqueName: \"kubernetes.io/projected/a414eff5-9a43-4ada-a8f2-a5a4451f3097-kube-api-access-rhp87\") pod \"dnsmasq-dns-78dd6ddcc-q5hp8\" (UID: \"a414eff5-9a43-4ada-a8f2-a5a4451f3097\") " pod="openstack/dnsmasq-dns-78dd6ddcc-q5hp8" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.849125 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlfxg\" (UniqueName: \"kubernetes.io/projected/96861537-7a24-4321-a52a-bfbdadaa396a-kube-api-access-xlfxg\") pod \"dnsmasq-dns-675f4bcbfc-9w8jb\" (UID: \"96861537-7a24-4321-a52a-bfbdadaa396a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-9w8jb" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.849981 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a414eff5-9a43-4ada-a8f2-a5a4451f3097-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-q5hp8\" (UID: \"a414eff5-9a43-4ada-a8f2-a5a4451f3097\") " pod="openstack/dnsmasq-dns-78dd6ddcc-q5hp8" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.850027 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a414eff5-9a43-4ada-a8f2-a5a4451f3097-config\") pod \"dnsmasq-dns-78dd6ddcc-q5hp8\" (UID: \"a414eff5-9a43-4ada-a8f2-a5a4451f3097\") " pod="openstack/dnsmasq-dns-78dd6ddcc-q5hp8" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.850217 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96861537-7a24-4321-a52a-bfbdadaa396a-config\") pod \"dnsmasq-dns-675f4bcbfc-9w8jb\" (UID: \"96861537-7a24-4321-a52a-bfbdadaa396a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-9w8jb" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.866420 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlfxg\" (UniqueName: \"kubernetes.io/projected/96861537-7a24-4321-a52a-bfbdadaa396a-kube-api-access-xlfxg\") pod \"dnsmasq-dns-675f4bcbfc-9w8jb\" (UID: \"96861537-7a24-4321-a52a-bfbdadaa396a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-9w8jb" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.878995 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhp87\" (UniqueName: \"kubernetes.io/projected/a414eff5-9a43-4ada-a8f2-a5a4451f3097-kube-api-access-rhp87\") pod \"dnsmasq-dns-78dd6ddcc-q5hp8\" (UID: \"a414eff5-9a43-4ada-a8f2-a5a4451f3097\") " pod="openstack/dnsmasq-dns-78dd6ddcc-q5hp8" Nov 25 23:16:19 crc kubenswrapper[5045]: I1125 23:16:19.991620 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-9w8jb" Nov 25 23:16:20 crc kubenswrapper[5045]: I1125 23:16:20.050178 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-q5hp8" Nov 25 23:16:20 crc kubenswrapper[5045]: I1125 23:16:20.459645 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-9w8jb"] Nov 25 23:16:20 crc kubenswrapper[5045]: I1125 23:16:20.516061 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-q5hp8"] Nov 25 23:16:20 crc kubenswrapper[5045]: W1125 23:16:20.518649 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda414eff5_9a43_4ada_a8f2_a5a4451f3097.slice/crio-2203d3c2ebfd9d59511e952a039f6ba056d792165319f4f7770cc3131fbf1198 WatchSource:0}: Error finding container 2203d3c2ebfd9d59511e952a039f6ba056d792165319f4f7770cc3131fbf1198: Status 404 returned error can't find the container with id 2203d3c2ebfd9d59511e952a039f6ba056d792165319f4f7770cc3131fbf1198 Nov 25 23:16:21 crc kubenswrapper[5045]: I1125 23:16:21.319131 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-q5hp8" event={"ID":"a414eff5-9a43-4ada-a8f2-a5a4451f3097","Type":"ContainerStarted","Data":"2203d3c2ebfd9d59511e952a039f6ba056d792165319f4f7770cc3131fbf1198"} Nov 25 23:16:21 crc kubenswrapper[5045]: I1125 23:16:21.320862 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-9w8jb" event={"ID":"96861537-7a24-4321-a52a-bfbdadaa396a","Type":"ContainerStarted","Data":"f96a7b81526c440b98fb4ee9fcaf55135dfef631fec1fbc21f2a46d1e0f36224"} Nov 25 23:16:22 crc kubenswrapper[5045]: I1125 23:16:22.576656 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-9w8jb"] Nov 25 23:16:22 crc kubenswrapper[5045]: I1125 23:16:22.597686 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-4xqdk"] Nov 25 23:16:22 crc kubenswrapper[5045]: I1125 23:16:22.600010 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" Nov 25 23:16:22 crc kubenswrapper[5045]: I1125 23:16:22.608028 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-4xqdk"] Nov 25 23:16:22 crc kubenswrapper[5045]: I1125 23:16:22.692398 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e35b7a88-26cb-4997-a934-934d15977a6b-config\") pod \"dnsmasq-dns-666b6646f7-4xqdk\" (UID: \"e35b7a88-26cb-4997-a934-934d15977a6b\") " pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" Nov 25 23:16:22 crc kubenswrapper[5045]: I1125 23:16:22.692452 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e35b7a88-26cb-4997-a934-934d15977a6b-dns-svc\") pod \"dnsmasq-dns-666b6646f7-4xqdk\" (UID: \"e35b7a88-26cb-4997-a934-934d15977a6b\") " pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" Nov 25 23:16:22 crc kubenswrapper[5045]: I1125 23:16:22.692543 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vt7bh\" (UniqueName: \"kubernetes.io/projected/e35b7a88-26cb-4997-a934-934d15977a6b-kube-api-access-vt7bh\") pod \"dnsmasq-dns-666b6646f7-4xqdk\" (UID: \"e35b7a88-26cb-4997-a934-934d15977a6b\") " pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" Nov 25 23:16:22 crc kubenswrapper[5045]: I1125 23:16:22.804534 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vt7bh\" (UniqueName: \"kubernetes.io/projected/e35b7a88-26cb-4997-a934-934d15977a6b-kube-api-access-vt7bh\") pod \"dnsmasq-dns-666b6646f7-4xqdk\" (UID: \"e35b7a88-26cb-4997-a934-934d15977a6b\") " pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" Nov 25 23:16:22 crc kubenswrapper[5045]: I1125 23:16:22.804873 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e35b7a88-26cb-4997-a934-934d15977a6b-config\") pod \"dnsmasq-dns-666b6646f7-4xqdk\" (UID: \"e35b7a88-26cb-4997-a934-934d15977a6b\") " pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" Nov 25 23:16:22 crc kubenswrapper[5045]: I1125 23:16:22.805092 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e35b7a88-26cb-4997-a934-934d15977a6b-dns-svc\") pod \"dnsmasq-dns-666b6646f7-4xqdk\" (UID: \"e35b7a88-26cb-4997-a934-934d15977a6b\") " pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" Nov 25 23:16:22 crc kubenswrapper[5045]: I1125 23:16:22.805786 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e35b7a88-26cb-4997-a934-934d15977a6b-config\") pod \"dnsmasq-dns-666b6646f7-4xqdk\" (UID: \"e35b7a88-26cb-4997-a934-934d15977a6b\") " pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" Nov 25 23:16:22 crc kubenswrapper[5045]: I1125 23:16:22.805958 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e35b7a88-26cb-4997-a934-934d15977a6b-dns-svc\") pod \"dnsmasq-dns-666b6646f7-4xqdk\" (UID: \"e35b7a88-26cb-4997-a934-934d15977a6b\") " pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" Nov 25 23:16:22 crc kubenswrapper[5045]: I1125 23:16:22.827596 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vt7bh\" (UniqueName: \"kubernetes.io/projected/e35b7a88-26cb-4997-a934-934d15977a6b-kube-api-access-vt7bh\") pod \"dnsmasq-dns-666b6646f7-4xqdk\" (UID: \"e35b7a88-26cb-4997-a934-934d15977a6b\") " pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" Nov 25 23:16:22 crc kubenswrapper[5045]: I1125 23:16:22.894512 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-q5hp8"] Nov 25 23:16:22 crc kubenswrapper[5045]: I1125 23:16:22.921584 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-qqh5j"] Nov 25 23:16:22 crc kubenswrapper[5045]: I1125 23:16:22.922161 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" Nov 25 23:16:22 crc kubenswrapper[5045]: I1125 23:16:22.923032 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" Nov 25 23:16:22 crc kubenswrapper[5045]: I1125 23:16:22.950999 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-qqh5j"] Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.010474 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/80f7416b-a1d6-41a4-b92b-8d4ef6cc3873-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-qqh5j\" (UID: \"80f7416b-a1d6-41a4-b92b-8d4ef6cc3873\") " pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.010530 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80f7416b-a1d6-41a4-b92b-8d4ef6cc3873-config\") pod \"dnsmasq-dns-57d769cc4f-qqh5j\" (UID: \"80f7416b-a1d6-41a4-b92b-8d4ef6cc3873\") " pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.010557 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvbjs\" (UniqueName: \"kubernetes.io/projected/80f7416b-a1d6-41a4-b92b-8d4ef6cc3873-kube-api-access-qvbjs\") pod \"dnsmasq-dns-57d769cc4f-qqh5j\" (UID: \"80f7416b-a1d6-41a4-b92b-8d4ef6cc3873\") " pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.125435 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/80f7416b-a1d6-41a4-b92b-8d4ef6cc3873-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-qqh5j\" (UID: \"80f7416b-a1d6-41a4-b92b-8d4ef6cc3873\") " pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.125493 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80f7416b-a1d6-41a4-b92b-8d4ef6cc3873-config\") pod \"dnsmasq-dns-57d769cc4f-qqh5j\" (UID: \"80f7416b-a1d6-41a4-b92b-8d4ef6cc3873\") " pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.125517 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvbjs\" (UniqueName: \"kubernetes.io/projected/80f7416b-a1d6-41a4-b92b-8d4ef6cc3873-kube-api-access-qvbjs\") pod \"dnsmasq-dns-57d769cc4f-qqh5j\" (UID: \"80f7416b-a1d6-41a4-b92b-8d4ef6cc3873\") " pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.127083 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/80f7416b-a1d6-41a4-b92b-8d4ef6cc3873-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-qqh5j\" (UID: \"80f7416b-a1d6-41a4-b92b-8d4ef6cc3873\") " pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.127759 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80f7416b-a1d6-41a4-b92b-8d4ef6cc3873-config\") pod \"dnsmasq-dns-57d769cc4f-qqh5j\" (UID: \"80f7416b-a1d6-41a4-b92b-8d4ef6cc3873\") " pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.142164 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvbjs\" (UniqueName: \"kubernetes.io/projected/80f7416b-a1d6-41a4-b92b-8d4ef6cc3873-kube-api-access-qvbjs\") pod \"dnsmasq-dns-57d769cc4f-qqh5j\" (UID: \"80f7416b-a1d6-41a4-b92b-8d4ef6cc3873\") " pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.246586 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.462364 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-4xqdk"] Nov 25 23:16:23 crc kubenswrapper[5045]: W1125 23:16:23.493791 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode35b7a88_26cb_4997_a934_934d15977a6b.slice/crio-f10e0f173a7089fc9e6da76b81ff3667745ac921c30d374a01de41f20d5153b8 WatchSource:0}: Error finding container f10e0f173a7089fc9e6da76b81ff3667745ac921c30d374a01de41f20d5153b8: Status 404 returned error can't find the container with id f10e0f173a7089fc9e6da76b81ff3667745ac921c30d374a01de41f20d5153b8 Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.735224 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.736486 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.740049 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.740107 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-hdhzj" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.740186 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.740113 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.740360 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.740439 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.740630 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.745778 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.832116 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-qqh5j"] Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.838611 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7fac1a35-2303-42d7-b27b-410ecff1b89a-pod-info\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.838662 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7fac1a35-2303-42d7-b27b-410ecff1b89a-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.838687 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22bsm\" (UniqueName: \"kubernetes.io/projected/7fac1a35-2303-42d7-b27b-410ecff1b89a-kube-api-access-22bsm\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.838721 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7fac1a35-2303-42d7-b27b-410ecff1b89a-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.838747 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.838763 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.838776 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.838809 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.838823 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7fac1a35-2303-42d7-b27b-410ecff1b89a-config-data\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.838848 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.838888 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7fac1a35-2303-42d7-b27b-410ecff1b89a-server-conf\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: W1125 23:16:23.841088 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod80f7416b_a1d6_41a4_b92b_8d4ef6cc3873.slice/crio-141c2ec38328dd741a94220816433d22d090fa7ca15b0ab33b9d845310423ef7 WatchSource:0}: Error finding container 141c2ec38328dd741a94220816433d22d090fa7ca15b0ab33b9d845310423ef7: Status 404 returned error can't find the container with id 141c2ec38328dd741a94220816433d22d090fa7ca15b0ab33b9d845310423ef7 Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.940129 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7fac1a35-2303-42d7-b27b-410ecff1b89a-pod-info\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.940187 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7fac1a35-2303-42d7-b27b-410ecff1b89a-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.940213 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22bsm\" (UniqueName: \"kubernetes.io/projected/7fac1a35-2303-42d7-b27b-410ecff1b89a-kube-api-access-22bsm\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.940230 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7fac1a35-2303-42d7-b27b-410ecff1b89a-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.940254 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.940270 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.940286 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.940320 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.940335 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7fac1a35-2303-42d7-b27b-410ecff1b89a-config-data\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.940361 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.940402 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7fac1a35-2303-42d7-b27b-410ecff1b89a-server-conf\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.941174 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.941575 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7fac1a35-2303-42d7-b27b-410ecff1b89a-server-conf\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.942292 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.942357 5045 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.942482 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7fac1a35-2303-42d7-b27b-410ecff1b89a-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.942644 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7fac1a35-2303-42d7-b27b-410ecff1b89a-config-data\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.947378 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7fac1a35-2303-42d7-b27b-410ecff1b89a-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.949581 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.952031 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7fac1a35-2303-42d7-b27b-410ecff1b89a-pod-info\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.952521 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.962004 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22bsm\" (UniqueName: \"kubernetes.io/projected/7fac1a35-2303-42d7-b27b-410ecff1b89a-kube-api-access-22bsm\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:23 crc kubenswrapper[5045]: I1125 23:16:23.974889 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " pod="openstack/rabbitmq-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.064047 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.073527 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.074699 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.074811 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.077401 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.077688 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.077862 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.077968 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-g7cld" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.078083 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.078865 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.079022 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.144145 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.144209 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.144403 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.144451 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.144488 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.144558 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.144582 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.144611 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ksmb\" (UniqueName: \"kubernetes.io/projected/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-kube-api-access-8ksmb\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.144646 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.144775 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.144809 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.245812 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.245863 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.245888 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.245909 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.245934 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.245947 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.245963 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ksmb\" (UniqueName: \"kubernetes.io/projected/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-kube-api-access-8ksmb\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.245980 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.246024 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.246044 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.246071 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.246982 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.247199 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.247620 5045 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.248061 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.248146 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.248554 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.264133 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.267075 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.268400 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.268567 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ksmb\" (UniqueName: \"kubernetes.io/projected/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-kube-api-access-8ksmb\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.269431 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.281585 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.384436 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" event={"ID":"80f7416b-a1d6-41a4-b92b-8d4ef6cc3873","Type":"ContainerStarted","Data":"141c2ec38328dd741a94220816433d22d090fa7ca15b0ab33b9d845310423ef7"} Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.386459 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" event={"ID":"e35b7a88-26cb-4997-a934-934d15977a6b","Type":"ContainerStarted","Data":"f10e0f173a7089fc9e6da76b81ff3667745ac921c30d374a01de41f20d5153b8"} Nov 25 23:16:24 crc kubenswrapper[5045]: I1125 23:16:24.435388 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.592811 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.596871 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.599238 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-q7scz" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.600028 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.600629 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.601111 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.605196 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.610683 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.679032 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.679068 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfk8k\" (UniqueName: \"kubernetes.io/projected/981d9260-fe05-4c33-9c46-c65a7a31c7b1-kube-api-access-lfk8k\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.679104 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/981d9260-fe05-4c33-9c46-c65a7a31c7b1-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.679131 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/981d9260-fe05-4c33-9c46-c65a7a31c7b1-config-data-default\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.679171 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/981d9260-fe05-4c33-9c46-c65a7a31c7b1-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.679271 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/981d9260-fe05-4c33-9c46-c65a7a31c7b1-operator-scripts\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.679317 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/981d9260-fe05-4c33-9c46-c65a7a31c7b1-config-data-generated\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.679383 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/981d9260-fe05-4c33-9c46-c65a7a31c7b1-kolla-config\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.780571 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/981d9260-fe05-4c33-9c46-c65a7a31c7b1-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.780649 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/981d9260-fe05-4c33-9c46-c65a7a31c7b1-config-data-default\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.780734 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/981d9260-fe05-4c33-9c46-c65a7a31c7b1-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.780776 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/981d9260-fe05-4c33-9c46-c65a7a31c7b1-operator-scripts\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.780804 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/981d9260-fe05-4c33-9c46-c65a7a31c7b1-config-data-generated\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.780838 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/981d9260-fe05-4c33-9c46-c65a7a31c7b1-kolla-config\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.780868 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.780898 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfk8k\" (UniqueName: \"kubernetes.io/projected/981d9260-fe05-4c33-9c46-c65a7a31c7b1-kube-api-access-lfk8k\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.781391 5045 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.781475 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/981d9260-fe05-4c33-9c46-c65a7a31c7b1-config-data-generated\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.781695 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/981d9260-fe05-4c33-9c46-c65a7a31c7b1-kolla-config\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.781930 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/981d9260-fe05-4c33-9c46-c65a7a31c7b1-config-data-default\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.782663 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/981d9260-fe05-4c33-9c46-c65a7a31c7b1-operator-scripts\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.786066 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/981d9260-fe05-4c33-9c46-c65a7a31c7b1-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.790384 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/981d9260-fe05-4c33-9c46-c65a7a31c7b1-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.800366 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfk8k\" (UniqueName: \"kubernetes.io/projected/981d9260-fe05-4c33-9c46-c65a7a31c7b1-kube-api-access-lfk8k\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.801877 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"981d9260-fe05-4c33-9c46-c65a7a31c7b1\") " pod="openstack/openstack-galera-0" Nov 25 23:16:25 crc kubenswrapper[5045]: I1125 23:16:25.923543 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.187497 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.189949 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.192658 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.193075 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.193362 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-vfzz8" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.193534 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.203602 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.236935 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.237988 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.241232 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.241706 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.246127 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-xlwb7" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.259096 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.309224 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/abaa26fa-f1a8-4249-8179-ad1b64334be5-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.309284 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/abaa26fa-f1a8-4249-8179-ad1b64334be5-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.309313 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/95d30530-9d52-442a-94e0-3e85871f0c4f-config-data\") pod \"memcached-0\" (UID: \"95d30530-9d52-442a-94e0-3e85871f0c4f\") " pod="openstack/memcached-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.309345 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c22s8\" (UniqueName: \"kubernetes.io/projected/95d30530-9d52-442a-94e0-3e85871f0c4f-kube-api-access-c22s8\") pod \"memcached-0\" (UID: \"95d30530-9d52-442a-94e0-3e85871f0c4f\") " pod="openstack/memcached-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.309361 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/abaa26fa-f1a8-4249-8179-ad1b64334be5-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.309376 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/abaa26fa-f1a8-4249-8179-ad1b64334be5-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.309399 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abaa26fa-f1a8-4249-8179-ad1b64334be5-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.309444 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.309465 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/95d30530-9d52-442a-94e0-3e85871f0c4f-memcached-tls-certs\") pod \"memcached-0\" (UID: \"95d30530-9d52-442a-94e0-3e85871f0c4f\") " pod="openstack/memcached-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.309479 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95d30530-9d52-442a-94e0-3e85871f0c4f-combined-ca-bundle\") pod \"memcached-0\" (UID: \"95d30530-9d52-442a-94e0-3e85871f0c4f\") " pod="openstack/memcached-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.309511 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxfqz\" (UniqueName: \"kubernetes.io/projected/abaa26fa-f1a8-4249-8179-ad1b64334be5-kube-api-access-bxfqz\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.309767 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/abaa26fa-f1a8-4249-8179-ad1b64334be5-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.310767 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/95d30530-9d52-442a-94e0-3e85871f0c4f-kolla-config\") pod \"memcached-0\" (UID: \"95d30530-9d52-442a-94e0-3e85871f0c4f\") " pod="openstack/memcached-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.411772 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/abaa26fa-f1a8-4249-8179-ad1b64334be5-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.411848 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/abaa26fa-f1a8-4249-8179-ad1b64334be5-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.411884 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/95d30530-9d52-442a-94e0-3e85871f0c4f-config-data\") pod \"memcached-0\" (UID: \"95d30530-9d52-442a-94e0-3e85871f0c4f\") " pod="openstack/memcached-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.411933 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c22s8\" (UniqueName: \"kubernetes.io/projected/95d30530-9d52-442a-94e0-3e85871f0c4f-kube-api-access-c22s8\") pod \"memcached-0\" (UID: \"95d30530-9d52-442a-94e0-3e85871f0c4f\") " pod="openstack/memcached-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.411953 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/abaa26fa-f1a8-4249-8179-ad1b64334be5-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.411969 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/abaa26fa-f1a8-4249-8179-ad1b64334be5-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.412008 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abaa26fa-f1a8-4249-8179-ad1b64334be5-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.412067 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.412095 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/95d30530-9d52-442a-94e0-3e85871f0c4f-memcached-tls-certs\") pod \"memcached-0\" (UID: \"95d30530-9d52-442a-94e0-3e85871f0c4f\") " pod="openstack/memcached-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.412110 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95d30530-9d52-442a-94e0-3e85871f0c4f-combined-ca-bundle\") pod \"memcached-0\" (UID: \"95d30530-9d52-442a-94e0-3e85871f0c4f\") " pod="openstack/memcached-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.412126 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxfqz\" (UniqueName: \"kubernetes.io/projected/abaa26fa-f1a8-4249-8179-ad1b64334be5-kube-api-access-bxfqz\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.412142 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/abaa26fa-f1a8-4249-8179-ad1b64334be5-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.412162 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/95d30530-9d52-442a-94e0-3e85871f0c4f-kolla-config\") pod \"memcached-0\" (UID: \"95d30530-9d52-442a-94e0-3e85871f0c4f\") " pod="openstack/memcached-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.412501 5045 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.412806 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/95d30530-9d52-442a-94e0-3e85871f0c4f-kolla-config\") pod \"memcached-0\" (UID: \"95d30530-9d52-442a-94e0-3e85871f0c4f\") " pod="openstack/memcached-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.412817 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/abaa26fa-f1a8-4249-8179-ad1b64334be5-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.413668 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/abaa26fa-f1a8-4249-8179-ad1b64334be5-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.413991 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/abaa26fa-f1a8-4249-8179-ad1b64334be5-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.415217 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/95d30530-9d52-442a-94e0-3e85871f0c4f-config-data\") pod \"memcached-0\" (UID: \"95d30530-9d52-442a-94e0-3e85871f0c4f\") " pod="openstack/memcached-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.418076 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/abaa26fa-f1a8-4249-8179-ad1b64334be5-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.421944 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/abaa26fa-f1a8-4249-8179-ad1b64334be5-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.422120 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abaa26fa-f1a8-4249-8179-ad1b64334be5-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.424688 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95d30530-9d52-442a-94e0-3e85871f0c4f-combined-ca-bundle\") pod \"memcached-0\" (UID: \"95d30530-9d52-442a-94e0-3e85871f0c4f\") " pod="openstack/memcached-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.425402 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/95d30530-9d52-442a-94e0-3e85871f0c4f-memcached-tls-certs\") pod \"memcached-0\" (UID: \"95d30530-9d52-442a-94e0-3e85871f0c4f\") " pod="openstack/memcached-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.436327 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c22s8\" (UniqueName: \"kubernetes.io/projected/95d30530-9d52-442a-94e0-3e85871f0c4f-kube-api-access-c22s8\") pod \"memcached-0\" (UID: \"95d30530-9d52-442a-94e0-3e85871f0c4f\") " pod="openstack/memcached-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.439090 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxfqz\" (UniqueName: \"kubernetes.io/projected/abaa26fa-f1a8-4249-8179-ad1b64334be5-kube-api-access-bxfqz\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.448341 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-cell1-galera-0\" (UID: \"abaa26fa-f1a8-4249-8179-ad1b64334be5\") " pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.513359 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:27 crc kubenswrapper[5045]: I1125 23:16:27.562816 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 23:16:28 crc kubenswrapper[5045]: I1125 23:16:28.674500 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 23:16:28 crc kubenswrapper[5045]: I1125 23:16:28.676460 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 23:16:28 crc kubenswrapper[5045]: I1125 23:16:28.679346 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-5lszt" Nov 25 23:16:28 crc kubenswrapper[5045]: I1125 23:16:28.697594 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 23:16:28 crc kubenswrapper[5045]: I1125 23:16:28.729098 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfpvf\" (UniqueName: \"kubernetes.io/projected/cf25e150-e63e-4987-89ce-6e1419e00e88-kube-api-access-gfpvf\") pod \"kube-state-metrics-0\" (UID: \"cf25e150-e63e-4987-89ce-6e1419e00e88\") " pod="openstack/kube-state-metrics-0" Nov 25 23:16:28 crc kubenswrapper[5045]: I1125 23:16:28.830484 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfpvf\" (UniqueName: \"kubernetes.io/projected/cf25e150-e63e-4987-89ce-6e1419e00e88-kube-api-access-gfpvf\") pod \"kube-state-metrics-0\" (UID: \"cf25e150-e63e-4987-89ce-6e1419e00e88\") " pod="openstack/kube-state-metrics-0" Nov 25 23:16:28 crc kubenswrapper[5045]: I1125 23:16:28.851305 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfpvf\" (UniqueName: \"kubernetes.io/projected/cf25e150-e63e-4987-89ce-6e1419e00e88-kube-api-access-gfpvf\") pod \"kube-state-metrics-0\" (UID: \"cf25e150-e63e-4987-89ce-6e1419e00e88\") " pod="openstack/kube-state-metrics-0" Nov 25 23:16:28 crc kubenswrapper[5045]: I1125 23:16:28.996114 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.684378 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-kfwsl"] Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.685989 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.688263 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-mwf8r" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.690319 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.691050 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.692702 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-z2vw6"] Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.694297 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.700350 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-kfwsl"] Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.704273 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/1765d304-e95f-43d5-9655-84e468fe332e-var-log\") pod \"ovn-controller-ovs-z2vw6\" (UID: \"1765d304-e95f-43d5-9655-84e468fe332e\") " pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.704321 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rw6gh\" (UniqueName: \"kubernetes.io/projected/efb42386-0a5d-423f-b31e-13e9433271ba-kube-api-access-rw6gh\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.704347 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/1765d304-e95f-43d5-9655-84e468fe332e-etc-ovs\") pod \"ovn-controller-ovs-z2vw6\" (UID: \"1765d304-e95f-43d5-9655-84e468fe332e\") " pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.704370 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1765d304-e95f-43d5-9655-84e468fe332e-scripts\") pod \"ovn-controller-ovs-z2vw6\" (UID: \"1765d304-e95f-43d5-9655-84e468fe332e\") " pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.704390 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/efb42386-0a5d-423f-b31e-13e9433271ba-var-log-ovn\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.704408 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/efb42386-0a5d-423f-b31e-13e9433271ba-scripts\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.704544 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/1765d304-e95f-43d5-9655-84e468fe332e-var-lib\") pod \"ovn-controller-ovs-z2vw6\" (UID: \"1765d304-e95f-43d5-9655-84e468fe332e\") " pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.704595 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/efb42386-0a5d-423f-b31e-13e9433271ba-ovn-controller-tls-certs\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.704660 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1765d304-e95f-43d5-9655-84e468fe332e-var-run\") pod \"ovn-controller-ovs-z2vw6\" (UID: \"1765d304-e95f-43d5-9655-84e468fe332e\") " pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.704727 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/efb42386-0a5d-423f-b31e-13e9433271ba-var-run\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.704752 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efb42386-0a5d-423f-b31e-13e9433271ba-combined-ca-bundle\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.704872 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qn9kl\" (UniqueName: \"kubernetes.io/projected/1765d304-e95f-43d5-9655-84e468fe332e-kube-api-access-qn9kl\") pod \"ovn-controller-ovs-z2vw6\" (UID: \"1765d304-e95f-43d5-9655-84e468fe332e\") " pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.704906 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/efb42386-0a5d-423f-b31e-13e9433271ba-var-run-ovn\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.728333 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-z2vw6"] Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.805970 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/1765d304-e95f-43d5-9655-84e468fe332e-var-log\") pod \"ovn-controller-ovs-z2vw6\" (UID: \"1765d304-e95f-43d5-9655-84e468fe332e\") " pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.806016 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rw6gh\" (UniqueName: \"kubernetes.io/projected/efb42386-0a5d-423f-b31e-13e9433271ba-kube-api-access-rw6gh\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.806055 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/1765d304-e95f-43d5-9655-84e468fe332e-etc-ovs\") pod \"ovn-controller-ovs-z2vw6\" (UID: \"1765d304-e95f-43d5-9655-84e468fe332e\") " pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.806072 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1765d304-e95f-43d5-9655-84e468fe332e-scripts\") pod \"ovn-controller-ovs-z2vw6\" (UID: \"1765d304-e95f-43d5-9655-84e468fe332e\") " pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.806094 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/efb42386-0a5d-423f-b31e-13e9433271ba-var-log-ovn\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.806129 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/efb42386-0a5d-423f-b31e-13e9433271ba-scripts\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.806173 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/1765d304-e95f-43d5-9655-84e468fe332e-var-lib\") pod \"ovn-controller-ovs-z2vw6\" (UID: \"1765d304-e95f-43d5-9655-84e468fe332e\") " pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.806223 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/efb42386-0a5d-423f-b31e-13e9433271ba-ovn-controller-tls-certs\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.806248 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1765d304-e95f-43d5-9655-84e468fe332e-var-run\") pod \"ovn-controller-ovs-z2vw6\" (UID: \"1765d304-e95f-43d5-9655-84e468fe332e\") " pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.806292 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/efb42386-0a5d-423f-b31e-13e9433271ba-var-run\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.806310 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efb42386-0a5d-423f-b31e-13e9433271ba-combined-ca-bundle\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.806391 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qn9kl\" (UniqueName: \"kubernetes.io/projected/1765d304-e95f-43d5-9655-84e468fe332e-kube-api-access-qn9kl\") pod \"ovn-controller-ovs-z2vw6\" (UID: \"1765d304-e95f-43d5-9655-84e468fe332e\") " pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.806415 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/efb42386-0a5d-423f-b31e-13e9433271ba-var-run-ovn\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.806544 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/1765d304-e95f-43d5-9655-84e468fe332e-etc-ovs\") pod \"ovn-controller-ovs-z2vw6\" (UID: \"1765d304-e95f-43d5-9655-84e468fe332e\") " pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.806672 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/1765d304-e95f-43d5-9655-84e468fe332e-var-log\") pod \"ovn-controller-ovs-z2vw6\" (UID: \"1765d304-e95f-43d5-9655-84e468fe332e\") " pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.806753 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1765d304-e95f-43d5-9655-84e468fe332e-var-run\") pod \"ovn-controller-ovs-z2vw6\" (UID: \"1765d304-e95f-43d5-9655-84e468fe332e\") " pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.806870 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/efb42386-0a5d-423f-b31e-13e9433271ba-var-run\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.807022 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/efb42386-0a5d-423f-b31e-13e9433271ba-var-log-ovn\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.807859 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/efb42386-0a5d-423f-b31e-13e9433271ba-var-run-ovn\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.809802 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1765d304-e95f-43d5-9655-84e468fe332e-scripts\") pod \"ovn-controller-ovs-z2vw6\" (UID: \"1765d304-e95f-43d5-9655-84e468fe332e\") " pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.812252 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/efb42386-0a5d-423f-b31e-13e9433271ba-ovn-controller-tls-certs\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.812855 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efb42386-0a5d-423f-b31e-13e9433271ba-combined-ca-bundle\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.820905 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/1765d304-e95f-43d5-9655-84e468fe332e-var-lib\") pod \"ovn-controller-ovs-z2vw6\" (UID: \"1765d304-e95f-43d5-9655-84e468fe332e\") " pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.821185 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/efb42386-0a5d-423f-b31e-13e9433271ba-scripts\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.821205 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rw6gh\" (UniqueName: \"kubernetes.io/projected/efb42386-0a5d-423f-b31e-13e9433271ba-kube-api-access-rw6gh\") pod \"ovn-controller-kfwsl\" (UID: \"efb42386-0a5d-423f-b31e-13e9433271ba\") " pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:32 crc kubenswrapper[5045]: I1125 23:16:32.828910 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qn9kl\" (UniqueName: \"kubernetes.io/projected/1765d304-e95f-43d5-9655-84e468fe332e-kube-api-access-qn9kl\") pod \"ovn-controller-ovs-z2vw6\" (UID: \"1765d304-e95f-43d5-9655-84e468fe332e\") " pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:33 crc kubenswrapper[5045]: I1125 23:16:33.018543 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:33 crc kubenswrapper[5045]: I1125 23:16:33.038824 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.541234 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.550397 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.555809 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.591455 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.592643 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.592802 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-mxqr2" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.592924 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.594795 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.641611 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dca2a357-2bb3-4400-a74b-5ec428e7a710-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.641680 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chtwl\" (UniqueName: \"kubernetes.io/projected/dca2a357-2bb3-4400-a74b-5ec428e7a710-kube-api-access-chtwl\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.641700 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dca2a357-2bb3-4400-a74b-5ec428e7a710-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.641733 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dca2a357-2bb3-4400-a74b-5ec428e7a710-config\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.641755 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/dca2a357-2bb3-4400-a74b-5ec428e7a710-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.641829 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.641858 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/dca2a357-2bb3-4400-a74b-5ec428e7a710-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.641872 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dca2a357-2bb3-4400-a74b-5ec428e7a710-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.742825 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-chtwl\" (UniqueName: \"kubernetes.io/projected/dca2a357-2bb3-4400-a74b-5ec428e7a710-kube-api-access-chtwl\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.742883 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dca2a357-2bb3-4400-a74b-5ec428e7a710-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.742915 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dca2a357-2bb3-4400-a74b-5ec428e7a710-config\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.742948 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/dca2a357-2bb3-4400-a74b-5ec428e7a710-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.743011 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.743047 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dca2a357-2bb3-4400-a74b-5ec428e7a710-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.743066 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/dca2a357-2bb3-4400-a74b-5ec428e7a710-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.743111 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dca2a357-2bb3-4400-a74b-5ec428e7a710-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.743735 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/dca2a357-2bb3-4400-a74b-5ec428e7a710-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.744489 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dca2a357-2bb3-4400-a74b-5ec428e7a710-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.744737 5045 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.745157 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dca2a357-2bb3-4400-a74b-5ec428e7a710-config\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.749458 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dca2a357-2bb3-4400-a74b-5ec428e7a710-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.755811 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/dca2a357-2bb3-4400-a74b-5ec428e7a710-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.763021 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-chtwl\" (UniqueName: \"kubernetes.io/projected/dca2a357-2bb3-4400-a74b-5ec428e7a710-kube-api-access-chtwl\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.769078 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dca2a357-2bb3-4400-a74b-5ec428e7a710-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.772988 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"dca2a357-2bb3-4400-a74b-5ec428e7a710\") " pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:34 crc kubenswrapper[5045]: I1125 23:16:34.909127 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.800297 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.802335 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.803938 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.804441 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-gljmp" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.804687 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.804691 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.810598 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.860932 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.861000 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.861026 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.861054 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.861079 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.861118 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.861138 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-config\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.861168 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6xrv\" (UniqueName: \"kubernetes.io/projected/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-kube-api-access-z6xrv\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.963190 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.963266 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.963291 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.963321 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.963346 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.963390 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.963408 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-config\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.963440 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6xrv\" (UniqueName: \"kubernetes.io/projected/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-kube-api-access-z6xrv\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.964323 5045 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.964835 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.967046 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-config\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.967316 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.968943 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.971582 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.975159 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.979133 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6xrv\" (UniqueName: \"kubernetes.io/projected/3f0b6c93-84c1-4b4a-8d1d-844d035fe867-kube-api-access-z6xrv\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:35 crc kubenswrapper[5045]: I1125 23:16:35.995129 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"3f0b6c93-84c1-4b4a-8d1d-844d035fe867\") " pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:36 crc kubenswrapper[5045]: I1125 23:16:36.164179 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:37 crc kubenswrapper[5045]: I1125 23:16:37.354370 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 23:16:37 crc kubenswrapper[5045]: W1125 23:16:37.728299 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb33fb38e_e7c7_4bb6_92ee_f98e45e71a95.slice/crio-b676b9f3e241789bdc5fe601c3a249dc871c4b34599754deaa2c8ead95e17f07 WatchSource:0}: Error finding container b676b9f3e241789bdc5fe601c3a249dc871c4b34599754deaa2c8ead95e17f07: Status 404 returned error can't find the container with id b676b9f3e241789bdc5fe601c3a249dc871c4b34599754deaa2c8ead95e17f07 Nov 25 23:16:37 crc kubenswrapper[5045]: E1125 23:16:37.733897 5045 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 25 23:16:37 crc kubenswrapper[5045]: E1125 23:16:37.734625 5045 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 25 23:16:37 crc kubenswrapper[5045]: E1125 23:16:37.734805 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xlfxg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-9w8jb_openstack(96861537-7a24-4321-a52a-bfbdadaa396a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 23:16:37 crc kubenswrapper[5045]: E1125 23:16:37.734216 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vt7bh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-4xqdk_openstack(e35b7a88-26cb-4997-a934-934d15977a6b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 23:16:37 crc kubenswrapper[5045]: E1125 23:16:37.736263 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" podUID="e35b7a88-26cb-4997-a934-934d15977a6b" Nov 25 23:16:37 crc kubenswrapper[5045]: E1125 23:16:37.736661 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-9w8jb" podUID="96861537-7a24-4321-a52a-bfbdadaa396a" Nov 25 23:16:37 crc kubenswrapper[5045]: E1125 23:16:37.757162 5045 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 25 23:16:37 crc kubenswrapper[5045]: E1125 23:16:37.757368 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rhp87,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-q5hp8_openstack(a414eff5-9a43-4ada-a8f2-a5a4451f3097): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 23:16:37 crc kubenswrapper[5045]: E1125 23:16:37.758569 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-q5hp8" podUID="a414eff5-9a43-4ada-a8f2-a5a4451f3097" Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.227690 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.338698 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 23:16:38 crc kubenswrapper[5045]: W1125 23:16:38.347801 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod981d9260_fe05_4c33_9c46_c65a7a31c7b1.slice/crio-f1a03882c27790da090091bc58b06bd6ad18fea24a3760bad6170e22be05d70b WatchSource:0}: Error finding container f1a03882c27790da090091bc58b06bd6ad18fea24a3760bad6170e22be05d70b: Status 404 returned error can't find the container with id f1a03882c27790da090091bc58b06bd6ad18fea24a3760bad6170e22be05d70b Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.348350 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.354380 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 23:16:38 crc kubenswrapper[5045]: W1125 23:16:38.358881 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podabaa26fa_f1a8_4249_8179_ad1b64334be5.slice/crio-d80d5b32fe143dea2bc0a906897039ef49967c47b4e2e5aecc103dcdd8c8fe02 WatchSource:0}: Error finding container d80d5b32fe143dea2bc0a906897039ef49967c47b4e2e5aecc103dcdd8c8fe02: Status 404 returned error can't find the container with id d80d5b32fe143dea2bc0a906897039ef49967c47b4e2e5aecc103dcdd8c8fe02 Nov 25 23:16:38 crc kubenswrapper[5045]: W1125 23:16:38.361499 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7fac1a35_2303_42d7_b27b_410ecff1b89a.slice/crio-0635c2263e6920e46260d89e8f63c1856a28d36e451f5b0f65a6bac32eeebb4c WatchSource:0}: Error finding container 0635c2263e6920e46260d89e8f63c1856a28d36e451f5b0f65a6bac32eeebb4c: Status 404 returned error can't find the container with id 0635c2263e6920e46260d89e8f63c1856a28d36e451f5b0f65a6bac32eeebb4c Nov 25 23:16:38 crc kubenswrapper[5045]: W1125 23:16:38.482070 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcf25e150_e63e_4987_89ce_6e1419e00e88.slice/crio-5840ac0803474125244042f84d64dad2d1362f190734b3cd2b2d4340d7128b35 WatchSource:0}: Error finding container 5840ac0803474125244042f84d64dad2d1362f190734b3cd2b2d4340d7128b35: Status 404 returned error can't find the container with id 5840ac0803474125244042f84d64dad2d1362f190734b3cd2b2d4340d7128b35 Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.484313 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.492551 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-kfwsl"] Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.537147 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95","Type":"ContainerStarted","Data":"b676b9f3e241789bdc5fe601c3a249dc871c4b34599754deaa2c8ead95e17f07"} Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.541432 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7fac1a35-2303-42d7-b27b-410ecff1b89a","Type":"ContainerStarted","Data":"0635c2263e6920e46260d89e8f63c1856a28d36e451f5b0f65a6bac32eeebb4c"} Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.544440 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"abaa26fa-f1a8-4249-8179-ad1b64334be5","Type":"ContainerStarted","Data":"d80d5b32fe143dea2bc0a906897039ef49967c47b4e2e5aecc103dcdd8c8fe02"} Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.547603 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"981d9260-fe05-4c33-9c46-c65a7a31c7b1","Type":"ContainerStarted","Data":"f1a03882c27790da090091bc58b06bd6ad18fea24a3760bad6170e22be05d70b"} Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.553188 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"95d30530-9d52-442a-94e0-3e85871f0c4f","Type":"ContainerStarted","Data":"8b17fa81d9f3f51343e20ad0d5632d85e2947fe2dffbc914adc1ccc25dd6e770"} Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.554891 5045 generic.go:334] "Generic (PLEG): container finished" podID="80f7416b-a1d6-41a4-b92b-8d4ef6cc3873" containerID="25f12c8c60d7183ea4a9b20f90847553401561de4f2f6b64b22ac635f6cdbf03" exitCode=0 Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.554933 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" event={"ID":"80f7416b-a1d6-41a4-b92b-8d4ef6cc3873","Type":"ContainerDied","Data":"25f12c8c60d7183ea4a9b20f90847553401561de4f2f6b64b22ac635f6cdbf03"} Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.556209 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"cf25e150-e63e-4987-89ce-6e1419e00e88","Type":"ContainerStarted","Data":"5840ac0803474125244042f84d64dad2d1362f190734b3cd2b2d4340d7128b35"} Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.558558 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-kfwsl" event={"ID":"efb42386-0a5d-423f-b31e-13e9433271ba","Type":"ContainerStarted","Data":"d94e3375df71f199f7c3f7ac545c2a551c54dbecdf521858e02604a42ee1d777"} Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.572680 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.701825 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-z2vw6"] Nov 25 23:16:38 crc kubenswrapper[5045]: W1125 23:16:38.709206 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1765d304_e95f_43d5_9655_84e468fe332e.slice/crio-aeffd11085c6cb433f7eae7f56182ed6b687f36021e8de887625213a47c1b612 WatchSource:0}: Error finding container aeffd11085c6cb433f7eae7f56182ed6b687f36021e8de887625213a47c1b612: Status 404 returned error can't find the container with id aeffd11085c6cb433f7eae7f56182ed6b687f36021e8de887625213a47c1b612 Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.870466 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-q5hp8" Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.918703 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a414eff5-9a43-4ada-a8f2-a5a4451f3097-config\") pod \"a414eff5-9a43-4ada-a8f2-a5a4451f3097\" (UID: \"a414eff5-9a43-4ada-a8f2-a5a4451f3097\") " Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.918770 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rhp87\" (UniqueName: \"kubernetes.io/projected/a414eff5-9a43-4ada-a8f2-a5a4451f3097-kube-api-access-rhp87\") pod \"a414eff5-9a43-4ada-a8f2-a5a4451f3097\" (UID: \"a414eff5-9a43-4ada-a8f2-a5a4451f3097\") " Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.918817 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a414eff5-9a43-4ada-a8f2-a5a4451f3097-dns-svc\") pod \"a414eff5-9a43-4ada-a8f2-a5a4451f3097\" (UID: \"a414eff5-9a43-4ada-a8f2-a5a4451f3097\") " Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.919145 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a414eff5-9a43-4ada-a8f2-a5a4451f3097-config" (OuterVolumeSpecName: "config") pod "a414eff5-9a43-4ada-a8f2-a5a4451f3097" (UID: "a414eff5-9a43-4ada-a8f2-a5a4451f3097"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.919304 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a414eff5-9a43-4ada-a8f2-a5a4451f3097-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.919572 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a414eff5-9a43-4ada-a8f2-a5a4451f3097-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a414eff5-9a43-4ada-a8f2-a5a4451f3097" (UID: "a414eff5-9a43-4ada-a8f2-a5a4451f3097"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:16:38 crc kubenswrapper[5045]: I1125 23:16:38.924319 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a414eff5-9a43-4ada-a8f2-a5a4451f3097-kube-api-access-rhp87" (OuterVolumeSpecName: "kube-api-access-rhp87") pod "a414eff5-9a43-4ada-a8f2-a5a4451f3097" (UID: "a414eff5-9a43-4ada-a8f2-a5a4451f3097"). InnerVolumeSpecName "kube-api-access-rhp87". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.024786 5045 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a414eff5-9a43-4ada-a8f2-a5a4451f3097-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.024836 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rhp87\" (UniqueName: \"kubernetes.io/projected/a414eff5-9a43-4ada-a8f2-a5a4451f3097-kube-api-access-rhp87\") on node \"crc\" DevicePath \"\"" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.031342 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-9w8jb" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.125534 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xlfxg\" (UniqueName: \"kubernetes.io/projected/96861537-7a24-4321-a52a-bfbdadaa396a-kube-api-access-xlfxg\") pod \"96861537-7a24-4321-a52a-bfbdadaa396a\" (UID: \"96861537-7a24-4321-a52a-bfbdadaa396a\") " Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.126033 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96861537-7a24-4321-a52a-bfbdadaa396a-config\") pod \"96861537-7a24-4321-a52a-bfbdadaa396a\" (UID: \"96861537-7a24-4321-a52a-bfbdadaa396a\") " Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.126785 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96861537-7a24-4321-a52a-bfbdadaa396a-config" (OuterVolumeSpecName: "config") pod "96861537-7a24-4321-a52a-bfbdadaa396a" (UID: "96861537-7a24-4321-a52a-bfbdadaa396a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.127200 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96861537-7a24-4321-a52a-bfbdadaa396a-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.130951 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96861537-7a24-4321-a52a-bfbdadaa396a-kube-api-access-xlfxg" (OuterVolumeSpecName: "kube-api-access-xlfxg") pod "96861537-7a24-4321-a52a-bfbdadaa396a" (UID: "96861537-7a24-4321-a52a-bfbdadaa396a"). InnerVolumeSpecName "kube-api-access-xlfxg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.217837 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-wrcbr"] Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.219026 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-wrcbr" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.223668 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.229059 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xlfxg\" (UniqueName: \"kubernetes.io/projected/96861537-7a24-4321-a52a-bfbdadaa396a-kube-api-access-xlfxg\") on node \"crc\" DevicePath \"\"" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.232761 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-wrcbr"] Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.330194 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/a5ad3a64-612a-442a-beed-2dcf6303b974-ovn-rundir\") pod \"ovn-controller-metrics-wrcbr\" (UID: \"a5ad3a64-612a-442a-beed-2dcf6303b974\") " pod="openstack/ovn-controller-metrics-wrcbr" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.330284 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5ad3a64-612a-442a-beed-2dcf6303b974-config\") pod \"ovn-controller-metrics-wrcbr\" (UID: \"a5ad3a64-612a-442a-beed-2dcf6303b974\") " pod="openstack/ovn-controller-metrics-wrcbr" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.330456 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5ad3a64-612a-442a-beed-2dcf6303b974-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wrcbr\" (UID: \"a5ad3a64-612a-442a-beed-2dcf6303b974\") " pod="openstack/ovn-controller-metrics-wrcbr" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.330510 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/a5ad3a64-612a-442a-beed-2dcf6303b974-ovs-rundir\") pod \"ovn-controller-metrics-wrcbr\" (UID: \"a5ad3a64-612a-442a-beed-2dcf6303b974\") " pod="openstack/ovn-controller-metrics-wrcbr" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.330652 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5ad3a64-612a-442a-beed-2dcf6303b974-combined-ca-bundle\") pod \"ovn-controller-metrics-wrcbr\" (UID: \"a5ad3a64-612a-442a-beed-2dcf6303b974\") " pod="openstack/ovn-controller-metrics-wrcbr" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.330668 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdkwt\" (UniqueName: \"kubernetes.io/projected/a5ad3a64-612a-442a-beed-2dcf6303b974-kube-api-access-jdkwt\") pod \"ovn-controller-metrics-wrcbr\" (UID: \"a5ad3a64-612a-442a-beed-2dcf6303b974\") " pod="openstack/ovn-controller-metrics-wrcbr" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.381728 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-qqh5j"] Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.415191 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-w8rr5"] Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.424299 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.426263 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.431760 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2aacdc6-eb97-4f00-8f5d-8352ca694351-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-w8rr5\" (UID: \"a2aacdc6-eb97-4f00-8f5d-8352ca694351\") " pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.431838 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5ad3a64-612a-442a-beed-2dcf6303b974-config\") pod \"ovn-controller-metrics-wrcbr\" (UID: \"a5ad3a64-612a-442a-beed-2dcf6303b974\") " pod="openstack/ovn-controller-metrics-wrcbr" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.431862 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a2aacdc6-eb97-4f00-8f5d-8352ca694351-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-w8rr5\" (UID: \"a2aacdc6-eb97-4f00-8f5d-8352ca694351\") " pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.431883 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2aacdc6-eb97-4f00-8f5d-8352ca694351-config\") pod \"dnsmasq-dns-6bc7876d45-w8rr5\" (UID: \"a2aacdc6-eb97-4f00-8f5d-8352ca694351\") " pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.431922 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5ad3a64-612a-442a-beed-2dcf6303b974-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wrcbr\" (UID: \"a5ad3a64-612a-442a-beed-2dcf6303b974\") " pod="openstack/ovn-controller-metrics-wrcbr" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.431949 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7pb2\" (UniqueName: \"kubernetes.io/projected/a2aacdc6-eb97-4f00-8f5d-8352ca694351-kube-api-access-q7pb2\") pod \"dnsmasq-dns-6bc7876d45-w8rr5\" (UID: \"a2aacdc6-eb97-4f00-8f5d-8352ca694351\") " pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.431968 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/a5ad3a64-612a-442a-beed-2dcf6303b974-ovs-rundir\") pod \"ovn-controller-metrics-wrcbr\" (UID: \"a5ad3a64-612a-442a-beed-2dcf6303b974\") " pod="openstack/ovn-controller-metrics-wrcbr" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.432020 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5ad3a64-612a-442a-beed-2dcf6303b974-combined-ca-bundle\") pod \"ovn-controller-metrics-wrcbr\" (UID: \"a5ad3a64-612a-442a-beed-2dcf6303b974\") " pod="openstack/ovn-controller-metrics-wrcbr" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.432035 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdkwt\" (UniqueName: \"kubernetes.io/projected/a5ad3a64-612a-442a-beed-2dcf6303b974-kube-api-access-jdkwt\") pod \"ovn-controller-metrics-wrcbr\" (UID: \"a5ad3a64-612a-442a-beed-2dcf6303b974\") " pod="openstack/ovn-controller-metrics-wrcbr" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.432070 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/a5ad3a64-612a-442a-beed-2dcf6303b974-ovn-rundir\") pod \"ovn-controller-metrics-wrcbr\" (UID: \"a5ad3a64-612a-442a-beed-2dcf6303b974\") " pod="openstack/ovn-controller-metrics-wrcbr" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.432338 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/a5ad3a64-612a-442a-beed-2dcf6303b974-ovn-rundir\") pod \"ovn-controller-metrics-wrcbr\" (UID: \"a5ad3a64-612a-442a-beed-2dcf6303b974\") " pod="openstack/ovn-controller-metrics-wrcbr" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.433781 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5ad3a64-612a-442a-beed-2dcf6303b974-config\") pod \"ovn-controller-metrics-wrcbr\" (UID: \"a5ad3a64-612a-442a-beed-2dcf6303b974\") " pod="openstack/ovn-controller-metrics-wrcbr" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.434350 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-w8rr5"] Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.434861 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/a5ad3a64-612a-442a-beed-2dcf6303b974-ovs-rundir\") pod \"ovn-controller-metrics-wrcbr\" (UID: \"a5ad3a64-612a-442a-beed-2dcf6303b974\") " pod="openstack/ovn-controller-metrics-wrcbr" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.437662 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5ad3a64-612a-442a-beed-2dcf6303b974-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wrcbr\" (UID: \"a5ad3a64-612a-442a-beed-2dcf6303b974\") " pod="openstack/ovn-controller-metrics-wrcbr" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.446233 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5ad3a64-612a-442a-beed-2dcf6303b974-combined-ca-bundle\") pod \"ovn-controller-metrics-wrcbr\" (UID: \"a5ad3a64-612a-442a-beed-2dcf6303b974\") " pod="openstack/ovn-controller-metrics-wrcbr" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.455499 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdkwt\" (UniqueName: \"kubernetes.io/projected/a5ad3a64-612a-442a-beed-2dcf6303b974-kube-api-access-jdkwt\") pod \"ovn-controller-metrics-wrcbr\" (UID: \"a5ad3a64-612a-442a-beed-2dcf6303b974\") " pod="openstack/ovn-controller-metrics-wrcbr" Nov 25 23:16:39 crc kubenswrapper[5045]: W1125 23:16:39.464531 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3f0b6c93_84c1_4b4a_8d1d_844d035fe867.slice/crio-ef22aaee7e3103bfbad872c61d200a3fa9dcc6be376c9858eb9a2979918554f0 WatchSource:0}: Error finding container ef22aaee7e3103bfbad872c61d200a3fa9dcc6be376c9858eb9a2979918554f0: Status 404 returned error can't find the container with id ef22aaee7e3103bfbad872c61d200a3fa9dcc6be376c9858eb9a2979918554f0 Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.466928 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.535925 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7pb2\" (UniqueName: \"kubernetes.io/projected/a2aacdc6-eb97-4f00-8f5d-8352ca694351-kube-api-access-q7pb2\") pod \"dnsmasq-dns-6bc7876d45-w8rr5\" (UID: \"a2aacdc6-eb97-4f00-8f5d-8352ca694351\") " pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.536042 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2aacdc6-eb97-4f00-8f5d-8352ca694351-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-w8rr5\" (UID: \"a2aacdc6-eb97-4f00-8f5d-8352ca694351\") " pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.536080 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a2aacdc6-eb97-4f00-8f5d-8352ca694351-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-w8rr5\" (UID: \"a2aacdc6-eb97-4f00-8f5d-8352ca694351\") " pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.536099 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2aacdc6-eb97-4f00-8f5d-8352ca694351-config\") pod \"dnsmasq-dns-6bc7876d45-w8rr5\" (UID: \"a2aacdc6-eb97-4f00-8f5d-8352ca694351\") " pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.537591 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a2aacdc6-eb97-4f00-8f5d-8352ca694351-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-w8rr5\" (UID: \"a2aacdc6-eb97-4f00-8f5d-8352ca694351\") " pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.537599 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2aacdc6-eb97-4f00-8f5d-8352ca694351-config\") pod \"dnsmasq-dns-6bc7876d45-w8rr5\" (UID: \"a2aacdc6-eb97-4f00-8f5d-8352ca694351\") " pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.538252 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2aacdc6-eb97-4f00-8f5d-8352ca694351-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-w8rr5\" (UID: \"a2aacdc6-eb97-4f00-8f5d-8352ca694351\") " pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.554738 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7pb2\" (UniqueName: \"kubernetes.io/projected/a2aacdc6-eb97-4f00-8f5d-8352ca694351-kube-api-access-q7pb2\") pod \"dnsmasq-dns-6bc7876d45-w8rr5\" (UID: \"a2aacdc6-eb97-4f00-8f5d-8352ca694351\") " pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.570481 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"3f0b6c93-84c1-4b4a-8d1d-844d035fe867","Type":"ContainerStarted","Data":"ef22aaee7e3103bfbad872c61d200a3fa9dcc6be376c9858eb9a2979918554f0"} Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.574303 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" event={"ID":"80f7416b-a1d6-41a4-b92b-8d4ef6cc3873","Type":"ContainerStarted","Data":"a671297a5222813f19618fa5e03e8b8f677fa8ac1d96487e0e9b8491092d2413"} Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.574456 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.577874 5045 generic.go:334] "Generic (PLEG): container finished" podID="e35b7a88-26cb-4997-a934-934d15977a6b" containerID="6e10e108147974763e776c68d1ff6d8e500d0d8615fce4aaa3617542c7cf07a1" exitCode=0 Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.577903 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" event={"ID":"e35b7a88-26cb-4997-a934-934d15977a6b","Type":"ContainerDied","Data":"6e10e108147974763e776c68d1ff6d8e500d0d8615fce4aaa3617542c7cf07a1"} Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.579534 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-q5hp8" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.579534 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-q5hp8" event={"ID":"a414eff5-9a43-4ada-a8f2-a5a4451f3097","Type":"ContainerDied","Data":"2203d3c2ebfd9d59511e952a039f6ba056d792165319f4f7770cc3131fbf1198"} Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.581693 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-z2vw6" event={"ID":"1765d304-e95f-43d5-9655-84e468fe332e","Type":"ContainerStarted","Data":"aeffd11085c6cb433f7eae7f56182ed6b687f36021e8de887625213a47c1b612"} Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.584868 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"dca2a357-2bb3-4400-a74b-5ec428e7a710","Type":"ContainerStarted","Data":"05d2bae2bcd427c2c83875c8ad670b76cd858d69730a7133c6c363c5dbfb95c0"} Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.586434 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-9w8jb" event={"ID":"96861537-7a24-4321-a52a-bfbdadaa396a","Type":"ContainerDied","Data":"f96a7b81526c440b98fb4ee9fcaf55135dfef631fec1fbc21f2a46d1e0f36224"} Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.586529 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-9w8jb" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.592111 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-wrcbr" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.599799 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" podStartSLOduration=3.6036524979999998 podStartE2EDuration="17.599780648s" podCreationTimestamp="2025-11-25 23:16:22 +0000 UTC" firstStartedPulling="2025-11-25 23:16:23.846572576 +0000 UTC m=+1040.204231688" lastFinishedPulling="2025-11-25 23:16:37.842700736 +0000 UTC m=+1054.200359838" observedRunningTime="2025-11-25 23:16:39.594693885 +0000 UTC m=+1055.952353007" watchObservedRunningTime="2025-11-25 23:16:39.599780648 +0000 UTC m=+1055.957439750" Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.655436 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-9w8jb"] Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.664128 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-9w8jb"] Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.691922 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-q5hp8"] Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.704899 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-q5hp8"] Nov 25 23:16:39 crc kubenswrapper[5045]: I1125 23:16:39.756686 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" Nov 25 23:16:40 crc kubenswrapper[5045]: I1125 23:16:40.405201 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96861537-7a24-4321-a52a-bfbdadaa396a" path="/var/lib/kubelet/pods/96861537-7a24-4321-a52a-bfbdadaa396a/volumes" Nov 25 23:16:40 crc kubenswrapper[5045]: I1125 23:16:40.405742 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a414eff5-9a43-4ada-a8f2-a5a4451f3097" path="/var/lib/kubelet/pods/a414eff5-9a43-4ada-a8f2-a5a4451f3097/volumes" Nov 25 23:16:40 crc kubenswrapper[5045]: I1125 23:16:40.594627 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" podUID="80f7416b-a1d6-41a4-b92b-8d4ef6cc3873" containerName="dnsmasq-dns" containerID="cri-o://a671297a5222813f19618fa5e03e8b8f677fa8ac1d96487e0e9b8491092d2413" gracePeriod=10 Nov 25 23:16:41 crc kubenswrapper[5045]: I1125 23:16:41.604224 5045 generic.go:334] "Generic (PLEG): container finished" podID="80f7416b-a1d6-41a4-b92b-8d4ef6cc3873" containerID="a671297a5222813f19618fa5e03e8b8f677fa8ac1d96487e0e9b8491092d2413" exitCode=0 Nov 25 23:16:41 crc kubenswrapper[5045]: I1125 23:16:41.604328 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" event={"ID":"80f7416b-a1d6-41a4-b92b-8d4ef6cc3873","Type":"ContainerDied","Data":"a671297a5222813f19618fa5e03e8b8f677fa8ac1d96487e0e9b8491092d2413"} Nov 25 23:16:45 crc kubenswrapper[5045]: I1125 23:16:45.398047 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" Nov 25 23:16:45 crc kubenswrapper[5045]: I1125 23:16:45.454670 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80f7416b-a1d6-41a4-b92b-8d4ef6cc3873-config\") pod \"80f7416b-a1d6-41a4-b92b-8d4ef6cc3873\" (UID: \"80f7416b-a1d6-41a4-b92b-8d4ef6cc3873\") " Nov 25 23:16:45 crc kubenswrapper[5045]: I1125 23:16:45.455112 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qvbjs\" (UniqueName: \"kubernetes.io/projected/80f7416b-a1d6-41a4-b92b-8d4ef6cc3873-kube-api-access-qvbjs\") pod \"80f7416b-a1d6-41a4-b92b-8d4ef6cc3873\" (UID: \"80f7416b-a1d6-41a4-b92b-8d4ef6cc3873\") " Nov 25 23:16:45 crc kubenswrapper[5045]: I1125 23:16:45.455180 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/80f7416b-a1d6-41a4-b92b-8d4ef6cc3873-dns-svc\") pod \"80f7416b-a1d6-41a4-b92b-8d4ef6cc3873\" (UID: \"80f7416b-a1d6-41a4-b92b-8d4ef6cc3873\") " Nov 25 23:16:45 crc kubenswrapper[5045]: I1125 23:16:45.465202 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80f7416b-a1d6-41a4-b92b-8d4ef6cc3873-kube-api-access-qvbjs" (OuterVolumeSpecName: "kube-api-access-qvbjs") pod "80f7416b-a1d6-41a4-b92b-8d4ef6cc3873" (UID: "80f7416b-a1d6-41a4-b92b-8d4ef6cc3873"). InnerVolumeSpecName "kube-api-access-qvbjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:16:45 crc kubenswrapper[5045]: I1125 23:16:45.512617 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80f7416b-a1d6-41a4-b92b-8d4ef6cc3873-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "80f7416b-a1d6-41a4-b92b-8d4ef6cc3873" (UID: "80f7416b-a1d6-41a4-b92b-8d4ef6cc3873"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:16:45 crc kubenswrapper[5045]: I1125 23:16:45.516317 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80f7416b-a1d6-41a4-b92b-8d4ef6cc3873-config" (OuterVolumeSpecName: "config") pod "80f7416b-a1d6-41a4-b92b-8d4ef6cc3873" (UID: "80f7416b-a1d6-41a4-b92b-8d4ef6cc3873"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:16:45 crc kubenswrapper[5045]: I1125 23:16:45.557748 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80f7416b-a1d6-41a4-b92b-8d4ef6cc3873-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:16:45 crc kubenswrapper[5045]: I1125 23:16:45.557789 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qvbjs\" (UniqueName: \"kubernetes.io/projected/80f7416b-a1d6-41a4-b92b-8d4ef6cc3873-kube-api-access-qvbjs\") on node \"crc\" DevicePath \"\"" Nov 25 23:16:45 crc kubenswrapper[5045]: I1125 23:16:45.557844 5045 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/80f7416b-a1d6-41a4-b92b-8d4ef6cc3873-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 23:16:45 crc kubenswrapper[5045]: I1125 23:16:45.633727 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" event={"ID":"80f7416b-a1d6-41a4-b92b-8d4ef6cc3873","Type":"ContainerDied","Data":"141c2ec38328dd741a94220816433d22d090fa7ca15b0ab33b9d845310423ef7"} Nov 25 23:16:45 crc kubenswrapper[5045]: I1125 23:16:45.633778 5045 scope.go:117] "RemoveContainer" containerID="a671297a5222813f19618fa5e03e8b8f677fa8ac1d96487e0e9b8491092d2413" Nov 25 23:16:45 crc kubenswrapper[5045]: I1125 23:16:45.633814 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" Nov 25 23:16:45 crc kubenswrapper[5045]: I1125 23:16:45.680004 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-qqh5j"] Nov 25 23:16:45 crc kubenswrapper[5045]: I1125 23:16:45.687066 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-qqh5j"] Nov 25 23:16:46 crc kubenswrapper[5045]: I1125 23:16:46.240458 5045 scope.go:117] "RemoveContainer" containerID="25f12c8c60d7183ea4a9b20f90847553401561de4f2f6b64b22ac635f6cdbf03" Nov 25 23:16:46 crc kubenswrapper[5045]: I1125 23:16:46.457443 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80f7416b-a1d6-41a4-b92b-8d4ef6cc3873" path="/var/lib/kubelet/pods/80f7416b-a1d6-41a4-b92b-8d4ef6cc3873/volumes" Nov 25 23:16:46 crc kubenswrapper[5045]: I1125 23:16:46.578951 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-wrcbr"] Nov 25 23:16:46 crc kubenswrapper[5045]: I1125 23:16:46.641276 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-wrcbr" event={"ID":"a5ad3a64-612a-442a-beed-2dcf6303b974","Type":"ContainerStarted","Data":"c514b3333d478619cefe15d91ff0ae4fe926b6925a4bc3110d481300102901e0"} Nov 25 23:16:46 crc kubenswrapper[5045]: I1125 23:16:46.645897 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" event={"ID":"e35b7a88-26cb-4997-a934-934d15977a6b","Type":"ContainerStarted","Data":"86d4430ada405e8f5713efb7965f1d489e48dc6e4e6ffca7738ae462bd10838f"} Nov 25 23:16:46 crc kubenswrapper[5045]: I1125 23:16:46.646897 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" Nov 25 23:16:46 crc kubenswrapper[5045]: I1125 23:16:46.666687 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" podStartSLOduration=-9223372012.188107 podStartE2EDuration="24.666669063s" podCreationTimestamp="2025-11-25 23:16:22 +0000 UTC" firstStartedPulling="2025-11-25 23:16:23.50581083 +0000 UTC m=+1039.863469942" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:16:46.66550042 +0000 UTC m=+1063.023159542" watchObservedRunningTime="2025-11-25 23:16:46.666669063 +0000 UTC m=+1063.024328175" Nov 25 23:16:46 crc kubenswrapper[5045]: I1125 23:16:46.710117 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-w8rr5"] Nov 25 23:16:47 crc kubenswrapper[5045]: I1125 23:16:47.658528 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"abaa26fa-f1a8-4249-8179-ad1b64334be5","Type":"ContainerStarted","Data":"e5a6c02f1bde72528e110d4f49ec9d763213cf5bb5845629522f1ce30658afa2"} Nov 25 23:16:47 crc kubenswrapper[5045]: I1125 23:16:47.660017 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" event={"ID":"a2aacdc6-eb97-4f00-8f5d-8352ca694351","Type":"ContainerStarted","Data":"8b73eba8f708d7dde496226eb31063c10adcd55d9c374bca6b0fd7f9e944212b"} Nov 25 23:16:47 crc kubenswrapper[5045]: I1125 23:16:47.662117 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"95d30530-9d52-442a-94e0-3e85871f0c4f","Type":"ContainerStarted","Data":"3bd563928c2a5679447c5e878ef5e45e58fbf1368f742df5d7197776ef751ba5"} Nov 25 23:16:47 crc kubenswrapper[5045]: I1125 23:16:47.662179 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 25 23:16:47 crc kubenswrapper[5045]: I1125 23:16:47.701611 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=12.915743208 podStartE2EDuration="20.701591024s" podCreationTimestamp="2025-11-25 23:16:27 +0000 UTC" firstStartedPulling="2025-11-25 23:16:38.233899943 +0000 UTC m=+1054.591559065" lastFinishedPulling="2025-11-25 23:16:46.019747769 +0000 UTC m=+1062.377406881" observedRunningTime="2025-11-25 23:16:47.698547339 +0000 UTC m=+1064.056206451" watchObservedRunningTime="2025-11-25 23:16:47.701591024 +0000 UTC m=+1064.059250136" Nov 25 23:16:48 crc kubenswrapper[5045]: I1125 23:16:48.247959 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-57d769cc4f-qqh5j" podUID="80f7416b-a1d6-41a4-b92b-8d4ef6cc3873" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.98:5353: i/o timeout" Nov 25 23:16:48 crc kubenswrapper[5045]: I1125 23:16:48.669969 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-kfwsl" event={"ID":"efb42386-0a5d-423f-b31e-13e9433271ba","Type":"ContainerStarted","Data":"cd89513eb06f2b8e4674a565e182bd9c36cf86db6a3525d252a3eceb0bdcd6aa"} Nov 25 23:16:48 crc kubenswrapper[5045]: I1125 23:16:48.670238 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-kfwsl" Nov 25 23:16:48 crc kubenswrapper[5045]: I1125 23:16:48.672417 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7fac1a35-2303-42d7-b27b-410ecff1b89a","Type":"ContainerStarted","Data":"08bb8b30e0afb480d73c05026da488ee13cf6aa9849f2d7245739149bf2c503d"} Nov 25 23:16:48 crc kubenswrapper[5045]: I1125 23:16:48.674539 5045 generic.go:334] "Generic (PLEG): container finished" podID="1765d304-e95f-43d5-9655-84e468fe332e" containerID="4a7e6216d745297b05cae6b0515be223535e870c9a6d70ecf44eedf4c1b106cd" exitCode=0 Nov 25 23:16:48 crc kubenswrapper[5045]: I1125 23:16:48.674593 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-z2vw6" event={"ID":"1765d304-e95f-43d5-9655-84e468fe332e","Type":"ContainerDied","Data":"4a7e6216d745297b05cae6b0515be223535e870c9a6d70ecf44eedf4c1b106cd"} Nov 25 23:16:48 crc kubenswrapper[5045]: I1125 23:16:48.676861 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"dca2a357-2bb3-4400-a74b-5ec428e7a710","Type":"ContainerStarted","Data":"54d1d74037b60276b5605f4e10ab11a6eee432739a9010a13cd7d900723b7a67"} Nov 25 23:16:48 crc kubenswrapper[5045]: I1125 23:16:48.678614 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"3f0b6c93-84c1-4b4a-8d1d-844d035fe867","Type":"ContainerStarted","Data":"e48948012ebfe7d7b3d3d3a643a97d8482c186f813e0ac487cd8fdc8d018e2ea"} Nov 25 23:16:48 crc kubenswrapper[5045]: I1125 23:16:48.680296 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95","Type":"ContainerStarted","Data":"c4c8fbce961154e8c88d1959873b9c4f63603220c3203632dd1b0f746dab6d48"} Nov 25 23:16:48 crc kubenswrapper[5045]: I1125 23:16:48.684619 5045 generic.go:334] "Generic (PLEG): container finished" podID="a2aacdc6-eb97-4f00-8f5d-8352ca694351" containerID="f721dc234fc13c223a2c6ef192fcdf4ed2a41f841ff843ada71b3ea3ce22962b" exitCode=0 Nov 25 23:16:48 crc kubenswrapper[5045]: I1125 23:16:48.684680 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" event={"ID":"a2aacdc6-eb97-4f00-8f5d-8352ca694351","Type":"ContainerDied","Data":"f721dc234fc13c223a2c6ef192fcdf4ed2a41f841ff843ada71b3ea3ce22962b"} Nov 25 23:16:48 crc kubenswrapper[5045]: I1125 23:16:48.689136 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-kfwsl" podStartSLOduration=8.932223875 podStartE2EDuration="16.68912126s" podCreationTimestamp="2025-11-25 23:16:32 +0000 UTC" firstStartedPulling="2025-11-25 23:16:38.509785924 +0000 UTC m=+1054.867445036" lastFinishedPulling="2025-11-25 23:16:46.266683289 +0000 UTC m=+1062.624342421" observedRunningTime="2025-11-25 23:16:48.685337355 +0000 UTC m=+1065.042996467" watchObservedRunningTime="2025-11-25 23:16:48.68912126 +0000 UTC m=+1065.046780372" Nov 25 23:16:48 crc kubenswrapper[5045]: I1125 23:16:48.694441 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"981d9260-fe05-4c33-9c46-c65a7a31c7b1","Type":"ContainerStarted","Data":"f2c2b1bc10cc76fb0d525ba4c99f87b1afcbf140c4df08987fba2fef8b239a58"} Nov 25 23:16:48 crc kubenswrapper[5045]: I1125 23:16:48.697277 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"cf25e150-e63e-4987-89ce-6e1419e00e88","Type":"ContainerStarted","Data":"65339fe1e0a8b29264bb776769c1925a8ecb345e0e20c66f76f46626b28194c4"} Nov 25 23:16:48 crc kubenswrapper[5045]: I1125 23:16:48.697856 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 23:16:48 crc kubenswrapper[5045]: I1125 23:16:48.802182 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=12.069370608 podStartE2EDuration="20.802161224s" podCreationTimestamp="2025-11-25 23:16:28 +0000 UTC" firstStartedPulling="2025-11-25 23:16:38.485902016 +0000 UTC m=+1054.843561138" lastFinishedPulling="2025-11-25 23:16:47.218692621 +0000 UTC m=+1063.576351754" observedRunningTime="2025-11-25 23:16:48.774386237 +0000 UTC m=+1065.132045349" watchObservedRunningTime="2025-11-25 23:16:48.802161224 +0000 UTC m=+1065.159820336" Nov 25 23:16:50 crc kubenswrapper[5045]: I1125 23:16:50.719423 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-z2vw6" event={"ID":"1765d304-e95f-43d5-9655-84e468fe332e","Type":"ContainerStarted","Data":"339564a49f2d37191a2dd73cd99f935e48df2b823a6ae071445b0ea56abded20"} Nov 25 23:16:50 crc kubenswrapper[5045]: I1125 23:16:50.720334 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-z2vw6" event={"ID":"1765d304-e95f-43d5-9655-84e468fe332e","Type":"ContainerStarted","Data":"0f037fda45d17c71dbaa0f235ffc0b922e9e735087d482b83d07b76469bccdef"} Nov 25 23:16:50 crc kubenswrapper[5045]: I1125 23:16:50.720381 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:50 crc kubenswrapper[5045]: I1125 23:16:50.720407 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:16:50 crc kubenswrapper[5045]: I1125 23:16:50.722342 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"dca2a357-2bb3-4400-a74b-5ec428e7a710","Type":"ContainerStarted","Data":"e039ce81e566f24a9799b92dd9c8853f35a9c20f5dc45117a138c0ba0255a7a9"} Nov 25 23:16:50 crc kubenswrapper[5045]: I1125 23:16:50.731082 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" event={"ID":"a2aacdc6-eb97-4f00-8f5d-8352ca694351","Type":"ContainerStarted","Data":"e02aedf5b2290c84688e281cddcbe39632af745a90e07c96ea2b2cd9d8788502"} Nov 25 23:16:50 crc kubenswrapper[5045]: I1125 23:16:50.731211 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" Nov 25 23:16:50 crc kubenswrapper[5045]: I1125 23:16:50.735179 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"3f0b6c93-84c1-4b4a-8d1d-844d035fe867","Type":"ContainerStarted","Data":"89522c95f2f6831afd822c97b2546337b7e40f9ad8012a5a64b5265c53dd1562"} Nov 25 23:16:50 crc kubenswrapper[5045]: I1125 23:16:50.738985 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-wrcbr" event={"ID":"a5ad3a64-612a-442a-beed-2dcf6303b974","Type":"ContainerStarted","Data":"1ba9541a2e545053c04de13a3662c4ad24abea9964866c3d865878f61589367f"} Nov 25 23:16:50 crc kubenswrapper[5045]: I1125 23:16:50.761484 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-z2vw6" podStartSLOduration=11.377322632 podStartE2EDuration="18.761456865s" podCreationTimestamp="2025-11-25 23:16:32 +0000 UTC" firstStartedPulling="2025-11-25 23:16:38.712133897 +0000 UTC m=+1055.069793009" lastFinishedPulling="2025-11-25 23:16:46.09626812 +0000 UTC m=+1062.453927242" observedRunningTime="2025-11-25 23:16:50.751081975 +0000 UTC m=+1067.108741107" watchObservedRunningTime="2025-11-25 23:16:50.761456865 +0000 UTC m=+1067.119116017" Nov 25 23:16:50 crc kubenswrapper[5045]: I1125 23:16:50.786385 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=6.382286912 podStartE2EDuration="17.786359982s" podCreationTimestamp="2025-11-25 23:16:33 +0000 UTC" firstStartedPulling="2025-11-25 23:16:38.58287801 +0000 UTC m=+1054.940537122" lastFinishedPulling="2025-11-25 23:16:49.98695108 +0000 UTC m=+1066.344610192" observedRunningTime="2025-11-25 23:16:50.776503846 +0000 UTC m=+1067.134162958" watchObservedRunningTime="2025-11-25 23:16:50.786359982 +0000 UTC m=+1067.144019104" Nov 25 23:16:50 crc kubenswrapper[5045]: I1125 23:16:50.796070 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-wrcbr" podStartSLOduration=8.448370688 podStartE2EDuration="11.796052613s" podCreationTimestamp="2025-11-25 23:16:39 +0000 UTC" firstStartedPulling="2025-11-25 23:16:46.633589207 +0000 UTC m=+1062.991248329" lastFinishedPulling="2025-11-25 23:16:49.981271132 +0000 UTC m=+1066.338930254" observedRunningTime="2025-11-25 23:16:50.794260513 +0000 UTC m=+1067.151919615" watchObservedRunningTime="2025-11-25 23:16:50.796052613 +0000 UTC m=+1067.153711725" Nov 25 23:16:50 crc kubenswrapper[5045]: I1125 23:16:50.841160 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=6.294747137 podStartE2EDuration="16.841139165s" podCreationTimestamp="2025-11-25 23:16:34 +0000 UTC" firstStartedPulling="2025-11-25 23:16:39.466168139 +0000 UTC m=+1055.823827251" lastFinishedPulling="2025-11-25 23:16:50.012560157 +0000 UTC m=+1066.370219279" observedRunningTime="2025-11-25 23:16:50.840496097 +0000 UTC m=+1067.198155239" watchObservedRunningTime="2025-11-25 23:16:50.841139165 +0000 UTC m=+1067.198798277" Nov 25 23:16:50 crc kubenswrapper[5045]: I1125 23:16:50.870803 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" podStartSLOduration=11.870780925 podStartE2EDuration="11.870780925s" podCreationTimestamp="2025-11-25 23:16:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:16:50.869677884 +0000 UTC m=+1067.227337036" watchObservedRunningTime="2025-11-25 23:16:50.870780925 +0000 UTC m=+1067.228440037" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.167671 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.167768 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.173666 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-4xqdk"] Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.173906 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" podUID="e35b7a88-26cb-4997-a934-934d15977a6b" containerName="dnsmasq-dns" containerID="cri-o://86d4430ada405e8f5713efb7965f1d489e48dc6e4e6ffca7738ae462bd10838f" gracePeriod=10 Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.178941 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.204647 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-n2lvl"] Nov 25 23:16:51 crc kubenswrapper[5045]: E1125 23:16:51.205003 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80f7416b-a1d6-41a4-b92b-8d4ef6cc3873" containerName="init" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.205021 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="80f7416b-a1d6-41a4-b92b-8d4ef6cc3873" containerName="init" Nov 25 23:16:51 crc kubenswrapper[5045]: E1125 23:16:51.205047 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80f7416b-a1d6-41a4-b92b-8d4ef6cc3873" containerName="dnsmasq-dns" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.205054 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="80f7416b-a1d6-41a4-b92b-8d4ef6cc3873" containerName="dnsmasq-dns" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.206076 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="80f7416b-a1d6-41a4-b92b-8d4ef6cc3873" containerName="dnsmasq-dns" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.207057 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.209564 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.228327 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-n2lvl"] Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.229804 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.355791 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-n2lvl\" (UID: \"e88e13a0-7cae-4c31-86c0-19526b581713\") " pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.356118 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-dns-svc\") pod \"dnsmasq-dns-8554648995-n2lvl\" (UID: \"e88e13a0-7cae-4c31-86c0-19526b581713\") " pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.356141 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lvxz\" (UniqueName: \"kubernetes.io/projected/e88e13a0-7cae-4c31-86c0-19526b581713-kube-api-access-7lvxz\") pod \"dnsmasq-dns-8554648995-n2lvl\" (UID: \"e88e13a0-7cae-4c31-86c0-19526b581713\") " pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.356186 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-n2lvl\" (UID: \"e88e13a0-7cae-4c31-86c0-19526b581713\") " pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.356235 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-config\") pod \"dnsmasq-dns-8554648995-n2lvl\" (UID: \"e88e13a0-7cae-4c31-86c0-19526b581713\") " pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.461741 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-n2lvl\" (UID: \"e88e13a0-7cae-4c31-86c0-19526b581713\") " pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.461783 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-dns-svc\") pod \"dnsmasq-dns-8554648995-n2lvl\" (UID: \"e88e13a0-7cae-4c31-86c0-19526b581713\") " pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.461807 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lvxz\" (UniqueName: \"kubernetes.io/projected/e88e13a0-7cae-4c31-86c0-19526b581713-kube-api-access-7lvxz\") pod \"dnsmasq-dns-8554648995-n2lvl\" (UID: \"e88e13a0-7cae-4c31-86c0-19526b581713\") " pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.461866 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-n2lvl\" (UID: \"e88e13a0-7cae-4c31-86c0-19526b581713\") " pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.461918 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-config\") pod \"dnsmasq-dns-8554648995-n2lvl\" (UID: \"e88e13a0-7cae-4c31-86c0-19526b581713\") " pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.462802 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-config\") pod \"dnsmasq-dns-8554648995-n2lvl\" (UID: \"e88e13a0-7cae-4c31-86c0-19526b581713\") " pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.463331 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-n2lvl\" (UID: \"e88e13a0-7cae-4c31-86c0-19526b581713\") " pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.465053 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-n2lvl\" (UID: \"e88e13a0-7cae-4c31-86c0-19526b581713\") " pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.465241 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-dns-svc\") pod \"dnsmasq-dns-8554648995-n2lvl\" (UID: \"e88e13a0-7cae-4c31-86c0-19526b581713\") " pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.482801 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lvxz\" (UniqueName: \"kubernetes.io/projected/e88e13a0-7cae-4c31-86c0-19526b581713-kube-api-access-7lvxz\") pod \"dnsmasq-dns-8554648995-n2lvl\" (UID: \"e88e13a0-7cae-4c31-86c0-19526b581713\") " pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.523496 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.644696 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.749844 5045 generic.go:334] "Generic (PLEG): container finished" podID="e35b7a88-26cb-4997-a934-934d15977a6b" containerID="86d4430ada405e8f5713efb7965f1d489e48dc6e4e6ffca7738ae462bd10838f" exitCode=0 Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.749923 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" event={"ID":"e35b7a88-26cb-4997-a934-934d15977a6b","Type":"ContainerDied","Data":"86d4430ada405e8f5713efb7965f1d489e48dc6e4e6ffca7738ae462bd10838f"} Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.750276 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" event={"ID":"e35b7a88-26cb-4997-a934-934d15977a6b","Type":"ContainerDied","Data":"f10e0f173a7089fc9e6da76b81ff3667745ac921c30d374a01de41f20d5153b8"} Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.750296 5045 scope.go:117] "RemoveContainer" containerID="86d4430ada405e8f5713efb7965f1d489e48dc6e4e6ffca7738ae462bd10838f" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.749933 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-4xqdk" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.754127 5045 generic.go:334] "Generic (PLEG): container finished" podID="abaa26fa-f1a8-4249-8179-ad1b64334be5" containerID="e5a6c02f1bde72528e110d4f49ec9d763213cf5bb5845629522f1ce30658afa2" exitCode=0 Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.754198 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"abaa26fa-f1a8-4249-8179-ad1b64334be5","Type":"ContainerDied","Data":"e5a6c02f1bde72528e110d4f49ec9d763213cf5bb5845629522f1ce30658afa2"} Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.757179 5045 generic.go:334] "Generic (PLEG): container finished" podID="981d9260-fe05-4c33-9c46-c65a7a31c7b1" containerID="f2c2b1bc10cc76fb0d525ba4c99f87b1afcbf140c4df08987fba2fef8b239a58" exitCode=0 Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.757209 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"981d9260-fe05-4c33-9c46-c65a7a31c7b1","Type":"ContainerDied","Data":"f2c2b1bc10cc76fb0d525ba4c99f87b1afcbf140c4df08987fba2fef8b239a58"} Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.769387 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e35b7a88-26cb-4997-a934-934d15977a6b-config\") pod \"e35b7a88-26cb-4997-a934-934d15977a6b\" (UID: \"e35b7a88-26cb-4997-a934-934d15977a6b\") " Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.769414 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e35b7a88-26cb-4997-a934-934d15977a6b-dns-svc\") pod \"e35b7a88-26cb-4997-a934-934d15977a6b\" (UID: \"e35b7a88-26cb-4997-a934-934d15977a6b\") " Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.769620 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt7bh\" (UniqueName: \"kubernetes.io/projected/e35b7a88-26cb-4997-a934-934d15977a6b-kube-api-access-vt7bh\") pod \"e35b7a88-26cb-4997-a934-934d15977a6b\" (UID: \"e35b7a88-26cb-4997-a934-934d15977a6b\") " Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.773486 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e35b7a88-26cb-4997-a934-934d15977a6b-kube-api-access-vt7bh" (OuterVolumeSpecName: "kube-api-access-vt7bh") pod "e35b7a88-26cb-4997-a934-934d15977a6b" (UID: "e35b7a88-26cb-4997-a934-934d15977a6b"). InnerVolumeSpecName "kube-api-access-vt7bh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.778517 5045 scope.go:117] "RemoveContainer" containerID="6e10e108147974763e776c68d1ff6d8e500d0d8615fce4aaa3617542c7cf07a1" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.825570 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e35b7a88-26cb-4997-a934-934d15977a6b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e35b7a88-26cb-4997-a934-934d15977a6b" (UID: "e35b7a88-26cb-4997-a934-934d15977a6b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.827263 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e35b7a88-26cb-4997-a934-934d15977a6b-config" (OuterVolumeSpecName: "config") pod "e35b7a88-26cb-4997-a934-934d15977a6b" (UID: "e35b7a88-26cb-4997-a934-934d15977a6b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.868067 5045 scope.go:117] "RemoveContainer" containerID="86d4430ada405e8f5713efb7965f1d489e48dc6e4e6ffca7738ae462bd10838f" Nov 25 23:16:51 crc kubenswrapper[5045]: E1125 23:16:51.868654 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86d4430ada405e8f5713efb7965f1d489e48dc6e4e6ffca7738ae462bd10838f\": container with ID starting with 86d4430ada405e8f5713efb7965f1d489e48dc6e4e6ffca7738ae462bd10838f not found: ID does not exist" containerID="86d4430ada405e8f5713efb7965f1d489e48dc6e4e6ffca7738ae462bd10838f" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.868684 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86d4430ada405e8f5713efb7965f1d489e48dc6e4e6ffca7738ae462bd10838f"} err="failed to get container status \"86d4430ada405e8f5713efb7965f1d489e48dc6e4e6ffca7738ae462bd10838f\": rpc error: code = NotFound desc = could not find container \"86d4430ada405e8f5713efb7965f1d489e48dc6e4e6ffca7738ae462bd10838f\": container with ID starting with 86d4430ada405e8f5713efb7965f1d489e48dc6e4e6ffca7738ae462bd10838f not found: ID does not exist" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.868718 5045 scope.go:117] "RemoveContainer" containerID="6e10e108147974763e776c68d1ff6d8e500d0d8615fce4aaa3617542c7cf07a1" Nov 25 23:16:51 crc kubenswrapper[5045]: E1125 23:16:51.869138 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e10e108147974763e776c68d1ff6d8e500d0d8615fce4aaa3617542c7cf07a1\": container with ID starting with 6e10e108147974763e776c68d1ff6d8e500d0d8615fce4aaa3617542c7cf07a1 not found: ID does not exist" containerID="6e10e108147974763e776c68d1ff6d8e500d0d8615fce4aaa3617542c7cf07a1" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.869236 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e10e108147974763e776c68d1ff6d8e500d0d8615fce4aaa3617542c7cf07a1"} err="failed to get container status \"6e10e108147974763e776c68d1ff6d8e500d0d8615fce4aaa3617542c7cf07a1\": rpc error: code = NotFound desc = could not find container \"6e10e108147974763e776c68d1ff6d8e500d0d8615fce4aaa3617542c7cf07a1\": container with ID starting with 6e10e108147974763e776c68d1ff6d8e500d0d8615fce4aaa3617542c7cf07a1 not found: ID does not exist" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.871514 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt7bh\" (UniqueName: \"kubernetes.io/projected/e35b7a88-26cb-4997-a934-934d15977a6b-kube-api-access-vt7bh\") on node \"crc\" DevicePath \"\"" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.871553 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e35b7a88-26cb-4997-a934-934d15977a6b-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.871568 5045 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e35b7a88-26cb-4997-a934-934d15977a6b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 23:16:51 crc kubenswrapper[5045]: I1125 23:16:51.992555 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-n2lvl"] Nov 25 23:16:51 crc kubenswrapper[5045]: W1125 23:16:51.995063 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode88e13a0_7cae_4c31_86c0_19526b581713.slice/crio-31340781b71139b00284718cd78940e945c92beebba989b768167832215f346b WatchSource:0}: Error finding container 31340781b71139b00284718cd78940e945c92beebba989b768167832215f346b: Status 404 returned error can't find the container with id 31340781b71139b00284718cd78940e945c92beebba989b768167832215f346b Nov 25 23:16:52 crc kubenswrapper[5045]: I1125 23:16:52.102145 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-4xqdk"] Nov 25 23:16:52 crc kubenswrapper[5045]: I1125 23:16:52.106470 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-4xqdk"] Nov 25 23:16:52 crc kubenswrapper[5045]: I1125 23:16:52.407858 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e35b7a88-26cb-4997-a934-934d15977a6b" path="/var/lib/kubelet/pods/e35b7a88-26cb-4997-a934-934d15977a6b/volumes" Nov 25 23:16:52 crc kubenswrapper[5045]: I1125 23:16:52.563872 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 25 23:16:52 crc kubenswrapper[5045]: I1125 23:16:52.767237 5045 generic.go:334] "Generic (PLEG): container finished" podID="e88e13a0-7cae-4c31-86c0-19526b581713" containerID="d068bc3a96f8d4fb768f6ed6b046dc1daa0fe387a379eaa01fdbd15f1e23e5c5" exitCode=0 Nov 25 23:16:52 crc kubenswrapper[5045]: I1125 23:16:52.767542 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-n2lvl" event={"ID":"e88e13a0-7cae-4c31-86c0-19526b581713","Type":"ContainerDied","Data":"d068bc3a96f8d4fb768f6ed6b046dc1daa0fe387a379eaa01fdbd15f1e23e5c5"} Nov 25 23:16:52 crc kubenswrapper[5045]: I1125 23:16:52.767572 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-n2lvl" event={"ID":"e88e13a0-7cae-4c31-86c0-19526b581713","Type":"ContainerStarted","Data":"31340781b71139b00284718cd78940e945c92beebba989b768167832215f346b"} Nov 25 23:16:52 crc kubenswrapper[5045]: I1125 23:16:52.771846 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"abaa26fa-f1a8-4249-8179-ad1b64334be5","Type":"ContainerStarted","Data":"2b3d64e3100c693bc9c08cf464442fa1bb0ba1f57c58704a69eb4557ed3f41dd"} Nov 25 23:16:52 crc kubenswrapper[5045]: I1125 23:16:52.776357 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"981d9260-fe05-4c33-9c46-c65a7a31c7b1","Type":"ContainerStarted","Data":"f923deb7f974974adf636b399e616c935dee2f11c9eddb165ed6edb812471abe"} Nov 25 23:16:52 crc kubenswrapper[5045]: I1125 23:16:52.837442 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=20.101595785 podStartE2EDuration="27.837255116s" podCreationTimestamp="2025-11-25 23:16:25 +0000 UTC" firstStartedPulling="2025-11-25 23:16:38.361503764 +0000 UTC m=+1054.719162886" lastFinishedPulling="2025-11-25 23:16:46.097163095 +0000 UTC m=+1062.454822217" observedRunningTime="2025-11-25 23:16:52.823573553 +0000 UTC m=+1069.181232665" watchObservedRunningTime="2025-11-25 23:16:52.837255116 +0000 UTC m=+1069.194914238" Nov 25 23:16:52 crc kubenswrapper[5045]: I1125 23:16:52.852208 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 25 23:16:52 crc kubenswrapper[5045]: I1125 23:16:52.869947 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=20.666595921 podStartE2EDuration="28.86993327s" podCreationTimestamp="2025-11-25 23:16:24 +0000 UTC" firstStartedPulling="2025-11-25 23:16:38.350235889 +0000 UTC m=+1054.707895001" lastFinishedPulling="2025-11-25 23:16:46.553573238 +0000 UTC m=+1062.911232350" observedRunningTime="2025-11-25 23:16:52.857366248 +0000 UTC m=+1069.215025360" watchObservedRunningTime="2025-11-25 23:16:52.86993327 +0000 UTC m=+1069.227592382" Nov 25 23:16:52 crc kubenswrapper[5045]: I1125 23:16:52.911122 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:52 crc kubenswrapper[5045]: I1125 23:16:52.966700 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:53 crc kubenswrapper[5045]: I1125 23:16:53.789486 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-n2lvl" event={"ID":"e88e13a0-7cae-4c31-86c0-19526b581713","Type":"ContainerStarted","Data":"0defa8acef3dc500e2170b1690f5377efa0ca3e374a3512c3fcd405d10d412b7"} Nov 25 23:16:53 crc kubenswrapper[5045]: I1125 23:16:53.789892 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:53 crc kubenswrapper[5045]: I1125 23:16:53.813621 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-n2lvl" podStartSLOduration=2.813604549 podStartE2EDuration="2.813604549s" podCreationTimestamp="2025-11-25 23:16:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:16:53.810635936 +0000 UTC m=+1070.168295058" watchObservedRunningTime="2025-11-25 23:16:53.813604549 +0000 UTC m=+1070.171263661" Nov 25 23:16:53 crc kubenswrapper[5045]: I1125 23:16:53.845113 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 25 23:16:53 crc kubenswrapper[5045]: I1125 23:16:53.993151 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 25 23:16:53 crc kubenswrapper[5045]: E1125 23:16:53.994516 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e35b7a88-26cb-4997-a934-934d15977a6b" containerName="dnsmasq-dns" Nov 25 23:16:53 crc kubenswrapper[5045]: I1125 23:16:53.994549 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="e35b7a88-26cb-4997-a934-934d15977a6b" containerName="dnsmasq-dns" Nov 25 23:16:53 crc kubenswrapper[5045]: E1125 23:16:53.994611 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e35b7a88-26cb-4997-a934-934d15977a6b" containerName="init" Nov 25 23:16:53 crc kubenswrapper[5045]: I1125 23:16:53.994628 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="e35b7a88-26cb-4997-a934-934d15977a6b" containerName="init" Nov 25 23:16:53 crc kubenswrapper[5045]: I1125 23:16:53.994922 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="e35b7a88-26cb-4997-a934-934d15977a6b" containerName="dnsmasq-dns" Nov 25 23:16:53 crc kubenswrapper[5045]: I1125 23:16:53.996698 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:53.999601 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:53.999915 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.000046 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-8b5wf" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.000222 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.031410 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.114401 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-scripts\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.114457 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.114538 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.114580 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2c4td\" (UniqueName: \"kubernetes.io/projected/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-kube-api-access-2c4td\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.114608 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-config\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.114628 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.114747 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.216209 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.216294 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.216341 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2c4td\" (UniqueName: \"kubernetes.io/projected/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-kube-api-access-2c4td\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.216368 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-config\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.216387 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.216417 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.216508 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-scripts\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.217407 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.217591 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-scripts\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.218148 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-config\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.221872 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.227984 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.228827 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.239411 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2c4td\" (UniqueName: \"kubernetes.io/projected/f7cd8ff8-57e1-4909-b7c7-93f707770aaa-kube-api-access-2c4td\") pod \"ovn-northd-0\" (UID: \"f7cd8ff8-57e1-4909-b7c7-93f707770aaa\") " pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.319141 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.782285 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 23:16:54 crc kubenswrapper[5045]: W1125 23:16:54.784965 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf7cd8ff8_57e1_4909_b7c7_93f707770aaa.slice/crio-6217f205ebd128949c5f30820cdb886afd97bd12188beab1dbc993a6944cec5b WatchSource:0}: Error finding container 6217f205ebd128949c5f30820cdb886afd97bd12188beab1dbc993a6944cec5b: Status 404 returned error can't find the container with id 6217f205ebd128949c5f30820cdb886afd97bd12188beab1dbc993a6944cec5b Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.820172 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f7cd8ff8-57e1-4909-b7c7-93f707770aaa","Type":"ContainerStarted","Data":"6217f205ebd128949c5f30820cdb886afd97bd12188beab1dbc993a6944cec5b"} Nov 25 23:16:54 crc kubenswrapper[5045]: I1125 23:16:54.820803 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:16:54 crc kubenswrapper[5045]: E1125 23:16:54.895234 5045 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.89:35730->38.102.83.89:38953: write tcp 38.102.83.89:35730->38.102.83.89:38953: write: broken pipe Nov 25 23:16:55 crc kubenswrapper[5045]: I1125 23:16:55.924446 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 25 23:16:55 crc kubenswrapper[5045]: I1125 23:16:55.924774 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 25 23:16:56 crc kubenswrapper[5045]: I1125 23:16:56.836100 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f7cd8ff8-57e1-4909-b7c7-93f707770aaa","Type":"ContainerStarted","Data":"a801e85be3f24261e567013398c202aabc26c20452a4e3d4b5258d54aa740dd3"} Nov 25 23:16:56 crc kubenswrapper[5045]: I1125 23:16:56.836377 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 25 23:16:56 crc kubenswrapper[5045]: I1125 23:16:56.836392 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f7cd8ff8-57e1-4909-b7c7-93f707770aaa","Type":"ContainerStarted","Data":"9da15d10f13909d45106aa750ea3aa961bb7c4df1aec61d18f44271134ffa836"} Nov 25 23:16:56 crc kubenswrapper[5045]: I1125 23:16:56.854004 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.841681754 podStartE2EDuration="3.853981093s" podCreationTimestamp="2025-11-25 23:16:53 +0000 UTC" firstStartedPulling="2025-11-25 23:16:54.787834902 +0000 UTC m=+1071.145494024" lastFinishedPulling="2025-11-25 23:16:55.800134251 +0000 UTC m=+1072.157793363" observedRunningTime="2025-11-25 23:16:56.85244108 +0000 UTC m=+1073.210100202" watchObservedRunningTime="2025-11-25 23:16:56.853981093 +0000 UTC m=+1073.211640245" Nov 25 23:16:57 crc kubenswrapper[5045]: I1125 23:16:57.514208 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:57 crc kubenswrapper[5045]: I1125 23:16:57.514675 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:57 crc kubenswrapper[5045]: I1125 23:16:57.621054 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:57 crc kubenswrapper[5045]: I1125 23:16:57.921992 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 25 23:16:59 crc kubenswrapper[5045]: I1125 23:16:59.002931 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 23:16:59 crc kubenswrapper[5045]: I1125 23:16:59.758640 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" Nov 25 23:17:00 crc kubenswrapper[5045]: I1125 23:17:00.157358 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 25 23:17:00 crc kubenswrapper[5045]: I1125 23:17:00.244696 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 25 23:17:01 crc kubenswrapper[5045]: I1125 23:17:01.526155 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:17:01 crc kubenswrapper[5045]: I1125 23:17:01.590346 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-w8rr5"] Nov 25 23:17:01 crc kubenswrapper[5045]: I1125 23:17:01.590614 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" podUID="a2aacdc6-eb97-4f00-8f5d-8352ca694351" containerName="dnsmasq-dns" containerID="cri-o://e02aedf5b2290c84688e281cddcbe39632af745a90e07c96ea2b2cd9d8788502" gracePeriod=10 Nov 25 23:17:01 crc kubenswrapper[5045]: I1125 23:17:01.877932 5045 generic.go:334] "Generic (PLEG): container finished" podID="a2aacdc6-eb97-4f00-8f5d-8352ca694351" containerID="e02aedf5b2290c84688e281cddcbe39632af745a90e07c96ea2b2cd9d8788502" exitCode=0 Nov 25 23:17:01 crc kubenswrapper[5045]: I1125 23:17:01.878028 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" event={"ID":"a2aacdc6-eb97-4f00-8f5d-8352ca694351","Type":"ContainerDied","Data":"e02aedf5b2290c84688e281cddcbe39632af745a90e07c96ea2b2cd9d8788502"} Nov 25 23:17:02 crc kubenswrapper[5045]: I1125 23:17:02.792057 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-0776-account-create-update-v2jfl"] Nov 25 23:17:02 crc kubenswrapper[5045]: I1125 23:17:02.793556 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-0776-account-create-update-v2jfl" Nov 25 23:17:02 crc kubenswrapper[5045]: I1125 23:17:02.796026 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 25 23:17:02 crc kubenswrapper[5045]: I1125 23:17:02.806170 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-0776-account-create-update-v2jfl"] Nov 25 23:17:02 crc kubenswrapper[5045]: I1125 23:17:02.840005 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-d5x24"] Nov 25 23:17:02 crc kubenswrapper[5045]: I1125 23:17:02.841093 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-d5x24" Nov 25 23:17:02 crc kubenswrapper[5045]: I1125 23:17:02.846475 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-d5x24"] Nov 25 23:17:02 crc kubenswrapper[5045]: I1125 23:17:02.970246 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4df595bc-d45d-4560-9203-c57daba13a4b-operator-scripts\") pod \"glance-db-create-d5x24\" (UID: \"4df595bc-d45d-4560-9203-c57daba13a4b\") " pod="openstack/glance-db-create-d5x24" Nov 25 23:17:02 crc kubenswrapper[5045]: I1125 23:17:02.970309 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4r6j\" (UniqueName: \"kubernetes.io/projected/bc65709d-5a60-4967-a2fa-21df985f4f82-kube-api-access-j4r6j\") pod \"glance-0776-account-create-update-v2jfl\" (UID: \"bc65709d-5a60-4967-a2fa-21df985f4f82\") " pod="openstack/glance-0776-account-create-update-v2jfl" Nov 25 23:17:02 crc kubenswrapper[5045]: I1125 23:17:02.970625 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc65709d-5a60-4967-a2fa-21df985f4f82-operator-scripts\") pod \"glance-0776-account-create-update-v2jfl\" (UID: \"bc65709d-5a60-4967-a2fa-21df985f4f82\") " pod="openstack/glance-0776-account-create-update-v2jfl" Nov 25 23:17:02 crc kubenswrapper[5045]: I1125 23:17:02.970825 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvsts\" (UniqueName: \"kubernetes.io/projected/4df595bc-d45d-4560-9203-c57daba13a4b-kube-api-access-kvsts\") pod \"glance-db-create-d5x24\" (UID: \"4df595bc-d45d-4560-9203-c57daba13a4b\") " pod="openstack/glance-db-create-d5x24" Nov 25 23:17:03 crc kubenswrapper[5045]: I1125 23:17:03.072646 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4df595bc-d45d-4560-9203-c57daba13a4b-operator-scripts\") pod \"glance-db-create-d5x24\" (UID: \"4df595bc-d45d-4560-9203-c57daba13a4b\") " pod="openstack/glance-db-create-d5x24" Nov 25 23:17:03 crc kubenswrapper[5045]: I1125 23:17:03.072755 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4r6j\" (UniqueName: \"kubernetes.io/projected/bc65709d-5a60-4967-a2fa-21df985f4f82-kube-api-access-j4r6j\") pod \"glance-0776-account-create-update-v2jfl\" (UID: \"bc65709d-5a60-4967-a2fa-21df985f4f82\") " pod="openstack/glance-0776-account-create-update-v2jfl" Nov 25 23:17:03 crc kubenswrapper[5045]: I1125 23:17:03.072939 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc65709d-5a60-4967-a2fa-21df985f4f82-operator-scripts\") pod \"glance-0776-account-create-update-v2jfl\" (UID: \"bc65709d-5a60-4967-a2fa-21df985f4f82\") " pod="openstack/glance-0776-account-create-update-v2jfl" Nov 25 23:17:03 crc kubenswrapper[5045]: I1125 23:17:03.073039 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvsts\" (UniqueName: \"kubernetes.io/projected/4df595bc-d45d-4560-9203-c57daba13a4b-kube-api-access-kvsts\") pod \"glance-db-create-d5x24\" (UID: \"4df595bc-d45d-4560-9203-c57daba13a4b\") " pod="openstack/glance-db-create-d5x24" Nov 25 23:17:03 crc kubenswrapper[5045]: I1125 23:17:03.074338 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4df595bc-d45d-4560-9203-c57daba13a4b-operator-scripts\") pod \"glance-db-create-d5x24\" (UID: \"4df595bc-d45d-4560-9203-c57daba13a4b\") " pod="openstack/glance-db-create-d5x24" Nov 25 23:17:03 crc kubenswrapper[5045]: I1125 23:17:03.076972 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc65709d-5a60-4967-a2fa-21df985f4f82-operator-scripts\") pod \"glance-0776-account-create-update-v2jfl\" (UID: \"bc65709d-5a60-4967-a2fa-21df985f4f82\") " pod="openstack/glance-0776-account-create-update-v2jfl" Nov 25 23:17:03 crc kubenswrapper[5045]: I1125 23:17:03.106992 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4r6j\" (UniqueName: \"kubernetes.io/projected/bc65709d-5a60-4967-a2fa-21df985f4f82-kube-api-access-j4r6j\") pod \"glance-0776-account-create-update-v2jfl\" (UID: \"bc65709d-5a60-4967-a2fa-21df985f4f82\") " pod="openstack/glance-0776-account-create-update-v2jfl" Nov 25 23:17:03 crc kubenswrapper[5045]: I1125 23:17:03.108039 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvsts\" (UniqueName: \"kubernetes.io/projected/4df595bc-d45d-4560-9203-c57daba13a4b-kube-api-access-kvsts\") pod \"glance-db-create-d5x24\" (UID: \"4df595bc-d45d-4560-9203-c57daba13a4b\") " pod="openstack/glance-db-create-d5x24" Nov 25 23:17:03 crc kubenswrapper[5045]: I1125 23:17:03.114403 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-0776-account-create-update-v2jfl" Nov 25 23:17:03 crc kubenswrapper[5045]: I1125 23:17:03.155092 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-d5x24" Nov 25 23:17:03 crc kubenswrapper[5045]: I1125 23:17:03.588347 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-0776-account-create-update-v2jfl"] Nov 25 23:17:03 crc kubenswrapper[5045]: W1125 23:17:03.595236 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbc65709d_5a60_4967_a2fa_21df985f4f82.slice/crio-7992605fcbf95bc6638e16bd56a999f87fa4319c44a33f27e7a041d09ce11d47 WatchSource:0}: Error finding container 7992605fcbf95bc6638e16bd56a999f87fa4319c44a33f27e7a041d09ce11d47: Status 404 returned error can't find the container with id 7992605fcbf95bc6638e16bd56a999f87fa4319c44a33f27e7a041d09ce11d47 Nov 25 23:17:03 crc kubenswrapper[5045]: I1125 23:17:03.726769 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-d5x24"] Nov 25 23:17:03 crc kubenswrapper[5045]: W1125 23:17:03.736017 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4df595bc_d45d_4560_9203_c57daba13a4b.slice/crio-0bb4b0f2fc4c7a4198808dc92451e523ada964fd31b21743aa3c2ff23d754f11 WatchSource:0}: Error finding container 0bb4b0f2fc4c7a4198808dc92451e523ada964fd31b21743aa3c2ff23d754f11: Status 404 returned error can't find the container with id 0bb4b0f2fc4c7a4198808dc92451e523ada964fd31b21743aa3c2ff23d754f11 Nov 25 23:17:03 crc kubenswrapper[5045]: I1125 23:17:03.894898 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-0776-account-create-update-v2jfl" event={"ID":"bc65709d-5a60-4967-a2fa-21df985f4f82","Type":"ContainerStarted","Data":"7992605fcbf95bc6638e16bd56a999f87fa4319c44a33f27e7a041d09ce11d47"} Nov 25 23:17:03 crc kubenswrapper[5045]: I1125 23:17:03.896822 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-d5x24" event={"ID":"4df595bc-d45d-4560-9203-c57daba13a4b","Type":"ContainerStarted","Data":"0bb4b0f2fc4c7a4198808dc92451e523ada964fd31b21743aa3c2ff23d754f11"} Nov 25 23:17:04 crc kubenswrapper[5045]: I1125 23:17:04.758443 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" podUID="a2aacdc6-eb97-4f00-8f5d-8352ca694351" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.110:5353: connect: connection refused" Nov 25 23:17:05 crc kubenswrapper[5045]: I1125 23:17:05.916511 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-0776-account-create-update-v2jfl" event={"ID":"bc65709d-5a60-4967-a2fa-21df985f4f82","Type":"ContainerStarted","Data":"65859d31a91ea1e129e603efda822d5a8aebbe48cd4ca2b5f84ea209a9661f91"} Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.117427 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.247528 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a2aacdc6-eb97-4f00-8f5d-8352ca694351-ovsdbserver-sb\") pod \"a2aacdc6-eb97-4f00-8f5d-8352ca694351\" (UID: \"a2aacdc6-eb97-4f00-8f5d-8352ca694351\") " Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.247778 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7pb2\" (UniqueName: \"kubernetes.io/projected/a2aacdc6-eb97-4f00-8f5d-8352ca694351-kube-api-access-q7pb2\") pod \"a2aacdc6-eb97-4f00-8f5d-8352ca694351\" (UID: \"a2aacdc6-eb97-4f00-8f5d-8352ca694351\") " Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.247823 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2aacdc6-eb97-4f00-8f5d-8352ca694351-config\") pod \"a2aacdc6-eb97-4f00-8f5d-8352ca694351\" (UID: \"a2aacdc6-eb97-4f00-8f5d-8352ca694351\") " Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.247932 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2aacdc6-eb97-4f00-8f5d-8352ca694351-dns-svc\") pod \"a2aacdc6-eb97-4f00-8f5d-8352ca694351\" (UID: \"a2aacdc6-eb97-4f00-8f5d-8352ca694351\") " Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.258935 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2aacdc6-eb97-4f00-8f5d-8352ca694351-kube-api-access-q7pb2" (OuterVolumeSpecName: "kube-api-access-q7pb2") pod "a2aacdc6-eb97-4f00-8f5d-8352ca694351" (UID: "a2aacdc6-eb97-4f00-8f5d-8352ca694351"). InnerVolumeSpecName "kube-api-access-q7pb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.296592 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2aacdc6-eb97-4f00-8f5d-8352ca694351-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a2aacdc6-eb97-4f00-8f5d-8352ca694351" (UID: "a2aacdc6-eb97-4f00-8f5d-8352ca694351"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.300218 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2aacdc6-eb97-4f00-8f5d-8352ca694351-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a2aacdc6-eb97-4f00-8f5d-8352ca694351" (UID: "a2aacdc6-eb97-4f00-8f5d-8352ca694351"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.320393 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2aacdc6-eb97-4f00-8f5d-8352ca694351-config" (OuterVolumeSpecName: "config") pod "a2aacdc6-eb97-4f00-8f5d-8352ca694351" (UID: "a2aacdc6-eb97-4f00-8f5d-8352ca694351"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.350599 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7pb2\" (UniqueName: \"kubernetes.io/projected/a2aacdc6-eb97-4f00-8f5d-8352ca694351-kube-api-access-q7pb2\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.350665 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2aacdc6-eb97-4f00-8f5d-8352ca694351-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.350702 5045 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2aacdc6-eb97-4f00-8f5d-8352ca694351-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.350763 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a2aacdc6-eb97-4f00-8f5d-8352ca694351-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.932416 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" event={"ID":"a2aacdc6-eb97-4f00-8f5d-8352ca694351","Type":"ContainerDied","Data":"8b73eba8f708d7dde496226eb31063c10adcd55d9c374bca6b0fd7f9e944212b"} Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.932847 5045 scope.go:117] "RemoveContainer" containerID="e02aedf5b2290c84688e281cddcbe39632af745a90e07c96ea2b2cd9d8788502" Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.932441 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-w8rr5" Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.935048 5045 generic.go:334] "Generic (PLEG): container finished" podID="4df595bc-d45d-4560-9203-c57daba13a4b" containerID="07c9aa194f0874c5b2e76aef4634a9365f72d06293d866714a67e8b1cb52173b" exitCode=0 Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.935136 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-d5x24" event={"ID":"4df595bc-d45d-4560-9203-c57daba13a4b","Type":"ContainerDied","Data":"07c9aa194f0874c5b2e76aef4634a9365f72d06293d866714a67e8b1cb52173b"} Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.939163 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-0776-account-create-update-v2jfl" event={"ID":"bc65709d-5a60-4967-a2fa-21df985f4f82","Type":"ContainerDied","Data":"65859d31a91ea1e129e603efda822d5a8aebbe48cd4ca2b5f84ea209a9661f91"} Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.939259 5045 generic.go:334] "Generic (PLEG): container finished" podID="bc65709d-5a60-4967-a2fa-21df985f4f82" containerID="65859d31a91ea1e129e603efda822d5a8aebbe48cd4ca2b5f84ea209a9661f91" exitCode=0 Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.966101 5045 scope.go:117] "RemoveContainer" containerID="f721dc234fc13c223a2c6ef192fcdf4ed2a41f841ff843ada71b3ea3ce22962b" Nov 25 23:17:06 crc kubenswrapper[5045]: I1125 23:17:06.997325 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-w8rr5"] Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.004064 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-w8rr5"] Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.137424 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-lf45m"] Nov 25 23:17:07 crc kubenswrapper[5045]: E1125 23:17:07.137958 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2aacdc6-eb97-4f00-8f5d-8352ca694351" containerName="dnsmasq-dns" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.137987 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2aacdc6-eb97-4f00-8f5d-8352ca694351" containerName="dnsmasq-dns" Nov 25 23:17:07 crc kubenswrapper[5045]: E1125 23:17:07.138027 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2aacdc6-eb97-4f00-8f5d-8352ca694351" containerName="init" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.138041 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2aacdc6-eb97-4f00-8f5d-8352ca694351" containerName="init" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.138325 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2aacdc6-eb97-4f00-8f5d-8352ca694351" containerName="dnsmasq-dns" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.139238 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-lf45m" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.150551 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-lf45m"] Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.250035 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-59af-account-create-update-v57d9"] Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.251889 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-59af-account-create-update-v57d9" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.254497 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.258729 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-59af-account-create-update-v57d9"] Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.265836 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czlh7\" (UniqueName: \"kubernetes.io/projected/a37b2f94-c1a1-41d7-8f92-baade66a55fa-kube-api-access-czlh7\") pod \"keystone-db-create-lf45m\" (UID: \"a37b2f94-c1a1-41d7-8f92-baade66a55fa\") " pod="openstack/keystone-db-create-lf45m" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.265938 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a37b2f94-c1a1-41d7-8f92-baade66a55fa-operator-scripts\") pod \"keystone-db-create-lf45m\" (UID: \"a37b2f94-c1a1-41d7-8f92-baade66a55fa\") " pod="openstack/keystone-db-create-lf45m" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.367389 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czlh7\" (UniqueName: \"kubernetes.io/projected/a37b2f94-c1a1-41d7-8f92-baade66a55fa-kube-api-access-czlh7\") pod \"keystone-db-create-lf45m\" (UID: \"a37b2f94-c1a1-41d7-8f92-baade66a55fa\") " pod="openstack/keystone-db-create-lf45m" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.367444 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a37b2f94-c1a1-41d7-8f92-baade66a55fa-operator-scripts\") pod \"keystone-db-create-lf45m\" (UID: \"a37b2f94-c1a1-41d7-8f92-baade66a55fa\") " pod="openstack/keystone-db-create-lf45m" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.367483 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/24819069-fb36-4a4b-ae07-9f7d67276eb3-operator-scripts\") pod \"keystone-59af-account-create-update-v57d9\" (UID: \"24819069-fb36-4a4b-ae07-9f7d67276eb3\") " pod="openstack/keystone-59af-account-create-update-v57d9" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.367514 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhhn6\" (UniqueName: \"kubernetes.io/projected/24819069-fb36-4a4b-ae07-9f7d67276eb3-kube-api-access-nhhn6\") pod \"keystone-59af-account-create-update-v57d9\" (UID: \"24819069-fb36-4a4b-ae07-9f7d67276eb3\") " pod="openstack/keystone-59af-account-create-update-v57d9" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.368467 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a37b2f94-c1a1-41d7-8f92-baade66a55fa-operator-scripts\") pod \"keystone-db-create-lf45m\" (UID: \"a37b2f94-c1a1-41d7-8f92-baade66a55fa\") " pod="openstack/keystone-db-create-lf45m" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.387553 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czlh7\" (UniqueName: \"kubernetes.io/projected/a37b2f94-c1a1-41d7-8f92-baade66a55fa-kube-api-access-czlh7\") pod \"keystone-db-create-lf45m\" (UID: \"a37b2f94-c1a1-41d7-8f92-baade66a55fa\") " pod="openstack/keystone-db-create-lf45m" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.464750 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-lf45m" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.469202 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/24819069-fb36-4a4b-ae07-9f7d67276eb3-operator-scripts\") pod \"keystone-59af-account-create-update-v57d9\" (UID: \"24819069-fb36-4a4b-ae07-9f7d67276eb3\") " pod="openstack/keystone-59af-account-create-update-v57d9" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.469285 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhhn6\" (UniqueName: \"kubernetes.io/projected/24819069-fb36-4a4b-ae07-9f7d67276eb3-kube-api-access-nhhn6\") pod \"keystone-59af-account-create-update-v57d9\" (UID: \"24819069-fb36-4a4b-ae07-9f7d67276eb3\") " pod="openstack/keystone-59af-account-create-update-v57d9" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.471304 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/24819069-fb36-4a4b-ae07-9f7d67276eb3-operator-scripts\") pod \"keystone-59af-account-create-update-v57d9\" (UID: \"24819069-fb36-4a4b-ae07-9f7d67276eb3\") " pod="openstack/keystone-59af-account-create-update-v57d9" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.481606 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-82cz2"] Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.483235 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-82cz2" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.493158 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhhn6\" (UniqueName: \"kubernetes.io/projected/24819069-fb36-4a4b-ae07-9f7d67276eb3-kube-api-access-nhhn6\") pod \"keystone-59af-account-create-update-v57d9\" (UID: \"24819069-fb36-4a4b-ae07-9f7d67276eb3\") " pod="openstack/keystone-59af-account-create-update-v57d9" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.494044 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-82cz2"] Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.572136 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rlzz\" (UniqueName: \"kubernetes.io/projected/8ac58ff5-0d42-4ac9-b966-d628a21a5c91-kube-api-access-2rlzz\") pod \"placement-db-create-82cz2\" (UID: \"8ac58ff5-0d42-4ac9-b966-d628a21a5c91\") " pod="openstack/placement-db-create-82cz2" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.572484 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ac58ff5-0d42-4ac9-b966-d628a21a5c91-operator-scripts\") pod \"placement-db-create-82cz2\" (UID: \"8ac58ff5-0d42-4ac9-b966-d628a21a5c91\") " pod="openstack/placement-db-create-82cz2" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.584050 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-59af-account-create-update-v57d9" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.592701 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-6b4a-account-create-update-rm6k5"] Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.594615 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6b4a-account-create-update-rm6k5" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.596897 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.600509 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6b4a-account-create-update-rm6k5"] Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.673562 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ac58ff5-0d42-4ac9-b966-d628a21a5c91-operator-scripts\") pod \"placement-db-create-82cz2\" (UID: \"8ac58ff5-0d42-4ac9-b966-d628a21a5c91\") " pod="openstack/placement-db-create-82cz2" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.673863 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rlzz\" (UniqueName: \"kubernetes.io/projected/8ac58ff5-0d42-4ac9-b966-d628a21a5c91-kube-api-access-2rlzz\") pod \"placement-db-create-82cz2\" (UID: \"8ac58ff5-0d42-4ac9-b966-d628a21a5c91\") " pod="openstack/placement-db-create-82cz2" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.675147 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ac58ff5-0d42-4ac9-b966-d628a21a5c91-operator-scripts\") pod \"placement-db-create-82cz2\" (UID: \"8ac58ff5-0d42-4ac9-b966-d628a21a5c91\") " pod="openstack/placement-db-create-82cz2" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.693303 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rlzz\" (UniqueName: \"kubernetes.io/projected/8ac58ff5-0d42-4ac9-b966-d628a21a5c91-kube-api-access-2rlzz\") pod \"placement-db-create-82cz2\" (UID: \"8ac58ff5-0d42-4ac9-b966-d628a21a5c91\") " pod="openstack/placement-db-create-82cz2" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.775791 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65dd8927-af0c-4e6e-99d8-7990e24a959c-operator-scripts\") pod \"placement-6b4a-account-create-update-rm6k5\" (UID: \"65dd8927-af0c-4e6e-99d8-7990e24a959c\") " pod="openstack/placement-6b4a-account-create-update-rm6k5" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.775875 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-829pn\" (UniqueName: \"kubernetes.io/projected/65dd8927-af0c-4e6e-99d8-7990e24a959c-kube-api-access-829pn\") pod \"placement-6b4a-account-create-update-rm6k5\" (UID: \"65dd8927-af0c-4e6e-99d8-7990e24a959c\") " pod="openstack/placement-6b4a-account-create-update-rm6k5" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.877527 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65dd8927-af0c-4e6e-99d8-7990e24a959c-operator-scripts\") pod \"placement-6b4a-account-create-update-rm6k5\" (UID: \"65dd8927-af0c-4e6e-99d8-7990e24a959c\") " pod="openstack/placement-6b4a-account-create-update-rm6k5" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.877603 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-829pn\" (UniqueName: \"kubernetes.io/projected/65dd8927-af0c-4e6e-99d8-7990e24a959c-kube-api-access-829pn\") pod \"placement-6b4a-account-create-update-rm6k5\" (UID: \"65dd8927-af0c-4e6e-99d8-7990e24a959c\") " pod="openstack/placement-6b4a-account-create-update-rm6k5" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.878271 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65dd8927-af0c-4e6e-99d8-7990e24a959c-operator-scripts\") pod \"placement-6b4a-account-create-update-rm6k5\" (UID: \"65dd8927-af0c-4e6e-99d8-7990e24a959c\") " pod="openstack/placement-6b4a-account-create-update-rm6k5" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.891860 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-829pn\" (UniqueName: \"kubernetes.io/projected/65dd8927-af0c-4e6e-99d8-7990e24a959c-kube-api-access-829pn\") pod \"placement-6b4a-account-create-update-rm6k5\" (UID: \"65dd8927-af0c-4e6e-99d8-7990e24a959c\") " pod="openstack/placement-6b4a-account-create-update-rm6k5" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.899284 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-82cz2" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.930485 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-lf45m"] Nov 25 23:17:07 crc kubenswrapper[5045]: W1125 23:17:07.938881 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda37b2f94_c1a1_41d7_8f92_baade66a55fa.slice/crio-158b64c59f422ff2b7f4414a867979ec6e56edfef611c4ceea8dc3db3aced078 WatchSource:0}: Error finding container 158b64c59f422ff2b7f4414a867979ec6e56edfef611c4ceea8dc3db3aced078: Status 404 returned error can't find the container with id 158b64c59f422ff2b7f4414a867979ec6e56edfef611c4ceea8dc3db3aced078 Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.946571 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6b4a-account-create-update-rm6k5" Nov 25 23:17:07 crc kubenswrapper[5045]: I1125 23:17:07.948765 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-lf45m" event={"ID":"a37b2f94-c1a1-41d7-8f92-baade66a55fa","Type":"ContainerStarted","Data":"158b64c59f422ff2b7f4414a867979ec6e56edfef611c4ceea8dc3db3aced078"} Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.042435 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-59af-account-create-update-v57d9"] Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.412868 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2aacdc6-eb97-4f00-8f5d-8352ca694351" path="/var/lib/kubelet/pods/a2aacdc6-eb97-4f00-8f5d-8352ca694351/volumes" Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.424583 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-82cz2"] Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.427198 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-d5x24" Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.469626 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-0776-account-create-update-v2jfl" Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.522047 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6b4a-account-create-update-rm6k5"] Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.592886 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc65709d-5a60-4967-a2fa-21df985f4f82-operator-scripts\") pod \"bc65709d-5a60-4967-a2fa-21df985f4f82\" (UID: \"bc65709d-5a60-4967-a2fa-21df985f4f82\") " Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.593005 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kvsts\" (UniqueName: \"kubernetes.io/projected/4df595bc-d45d-4560-9203-c57daba13a4b-kube-api-access-kvsts\") pod \"4df595bc-d45d-4560-9203-c57daba13a4b\" (UID: \"4df595bc-d45d-4560-9203-c57daba13a4b\") " Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.593098 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4df595bc-d45d-4560-9203-c57daba13a4b-operator-scripts\") pod \"4df595bc-d45d-4560-9203-c57daba13a4b\" (UID: \"4df595bc-d45d-4560-9203-c57daba13a4b\") " Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.593125 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j4r6j\" (UniqueName: \"kubernetes.io/projected/bc65709d-5a60-4967-a2fa-21df985f4f82-kube-api-access-j4r6j\") pod \"bc65709d-5a60-4967-a2fa-21df985f4f82\" (UID: \"bc65709d-5a60-4967-a2fa-21df985f4f82\") " Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.594093 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4df595bc-d45d-4560-9203-c57daba13a4b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4df595bc-d45d-4560-9203-c57daba13a4b" (UID: "4df595bc-d45d-4560-9203-c57daba13a4b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.594283 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc65709d-5a60-4967-a2fa-21df985f4f82-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bc65709d-5a60-4967-a2fa-21df985f4f82" (UID: "bc65709d-5a60-4967-a2fa-21df985f4f82"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.597939 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc65709d-5a60-4967-a2fa-21df985f4f82-kube-api-access-j4r6j" (OuterVolumeSpecName: "kube-api-access-j4r6j") pod "bc65709d-5a60-4967-a2fa-21df985f4f82" (UID: "bc65709d-5a60-4967-a2fa-21df985f4f82"). InnerVolumeSpecName "kube-api-access-j4r6j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.598101 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4df595bc-d45d-4560-9203-c57daba13a4b-kube-api-access-kvsts" (OuterVolumeSpecName: "kube-api-access-kvsts") pod "4df595bc-d45d-4560-9203-c57daba13a4b" (UID: "4df595bc-d45d-4560-9203-c57daba13a4b"). InnerVolumeSpecName "kube-api-access-kvsts". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:17:08 crc kubenswrapper[5045]: W1125 23:17:08.608678 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod65dd8927_af0c_4e6e_99d8_7990e24a959c.slice/crio-39568f21065c36dcf0d7fad48158f995f67b0c3e7dd529fb9f6baf3f6e1fdf3f WatchSource:0}: Error finding container 39568f21065c36dcf0d7fad48158f995f67b0c3e7dd529fb9f6baf3f6e1fdf3f: Status 404 returned error can't find the container with id 39568f21065c36dcf0d7fad48158f995f67b0c3e7dd529fb9f6baf3f6e1fdf3f Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.694943 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kvsts\" (UniqueName: \"kubernetes.io/projected/4df595bc-d45d-4560-9203-c57daba13a4b-kube-api-access-kvsts\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.694986 5045 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4df595bc-d45d-4560-9203-c57daba13a4b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.695005 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j4r6j\" (UniqueName: \"kubernetes.io/projected/bc65709d-5a60-4967-a2fa-21df985f4f82-kube-api-access-j4r6j\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.695022 5045 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc65709d-5a60-4967-a2fa-21df985f4f82-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.962425 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6b4a-account-create-update-rm6k5" event={"ID":"65dd8927-af0c-4e6e-99d8-7990e24a959c","Type":"ContainerStarted","Data":"936f7fea413558d85250c41e02ed1f30acfb2d9a6242694bdbfc29de296c93df"} Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.962491 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6b4a-account-create-update-rm6k5" event={"ID":"65dd8927-af0c-4e6e-99d8-7990e24a959c","Type":"ContainerStarted","Data":"39568f21065c36dcf0d7fad48158f995f67b0c3e7dd529fb9f6baf3f6e1fdf3f"} Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.965777 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-d5x24" Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.965793 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-d5x24" event={"ID":"4df595bc-d45d-4560-9203-c57daba13a4b","Type":"ContainerDied","Data":"0bb4b0f2fc4c7a4198808dc92451e523ada964fd31b21743aa3c2ff23d754f11"} Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.965831 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0bb4b0f2fc4c7a4198808dc92451e523ada964fd31b21743aa3c2ff23d754f11" Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.969460 5045 generic.go:334] "Generic (PLEG): container finished" podID="24819069-fb36-4a4b-ae07-9f7d67276eb3" containerID="5e6d6260f96dc437b08a62ae214aef03f28fa362c61c2019c45d6581badbae76" exitCode=0 Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.969532 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-59af-account-create-update-v57d9" event={"ID":"24819069-fb36-4a4b-ae07-9f7d67276eb3","Type":"ContainerDied","Data":"5e6d6260f96dc437b08a62ae214aef03f28fa362c61c2019c45d6581badbae76"} Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.969560 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-59af-account-create-update-v57d9" event={"ID":"24819069-fb36-4a4b-ae07-9f7d67276eb3","Type":"ContainerStarted","Data":"ee3b151808ab36d8f6aae420c0c1e54697a89eeedd6ab8224fab942e19499e6e"} Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.971631 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-0776-account-create-update-v2jfl" event={"ID":"bc65709d-5a60-4967-a2fa-21df985f4f82","Type":"ContainerDied","Data":"7992605fcbf95bc6638e16bd56a999f87fa4319c44a33f27e7a041d09ce11d47"} Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.971661 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7992605fcbf95bc6638e16bd56a999f87fa4319c44a33f27e7a041d09ce11d47" Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.971739 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-0776-account-create-update-v2jfl" Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.974306 5045 generic.go:334] "Generic (PLEG): container finished" podID="8ac58ff5-0d42-4ac9-b966-d628a21a5c91" containerID="baaee5ce924f8633f6b8d0b906fe203d5b4c7c31199dd5fc3dee4bc4364004e3" exitCode=0 Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.974358 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-82cz2" event={"ID":"8ac58ff5-0d42-4ac9-b966-d628a21a5c91","Type":"ContainerDied","Data":"baaee5ce924f8633f6b8d0b906fe203d5b4c7c31199dd5fc3dee4bc4364004e3"} Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.974409 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-82cz2" event={"ID":"8ac58ff5-0d42-4ac9-b966-d628a21a5c91","Type":"ContainerStarted","Data":"da35eece10c57f990d0584b989efa27463977a7601248ff7b7432f6d662043fc"} Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.977336 5045 generic.go:334] "Generic (PLEG): container finished" podID="a37b2f94-c1a1-41d7-8f92-baade66a55fa" containerID="4ce7f8d7e27b2dd0cbae3334aaa921da50a2b3dcfeaa9afd11cc032dc1207385" exitCode=0 Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.977423 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-lf45m" event={"ID":"a37b2f94-c1a1-41d7-8f92-baade66a55fa","Type":"ContainerDied","Data":"4ce7f8d7e27b2dd0cbae3334aaa921da50a2b3dcfeaa9afd11cc032dc1207385"} Nov 25 23:17:08 crc kubenswrapper[5045]: I1125 23:17:08.983663 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-6b4a-account-create-update-rm6k5" podStartSLOduration=1.983645279 podStartE2EDuration="1.983645279s" podCreationTimestamp="2025-11-25 23:17:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:17:08.982175118 +0000 UTC m=+1085.339834260" watchObservedRunningTime="2025-11-25 23:17:08.983645279 +0000 UTC m=+1085.341304391" Nov 25 23:17:09 crc kubenswrapper[5045]: I1125 23:17:09.424798 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 25 23:17:09 crc kubenswrapper[5045]: I1125 23:17:09.991954 5045 generic.go:334] "Generic (PLEG): container finished" podID="65dd8927-af0c-4e6e-99d8-7990e24a959c" containerID="936f7fea413558d85250c41e02ed1f30acfb2d9a6242694bdbfc29de296c93df" exitCode=0 Nov 25 23:17:09 crc kubenswrapper[5045]: I1125 23:17:09.992922 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6b4a-account-create-update-rm6k5" event={"ID":"65dd8927-af0c-4e6e-99d8-7990e24a959c","Type":"ContainerDied","Data":"936f7fea413558d85250c41e02ed1f30acfb2d9a6242694bdbfc29de296c93df"} Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.384286 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-lf45m" Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.463659 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-59af-account-create-update-v57d9" Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.474587 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-82cz2" Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.543614 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a37b2f94-c1a1-41d7-8f92-baade66a55fa-operator-scripts\") pod \"a37b2f94-c1a1-41d7-8f92-baade66a55fa\" (UID: \"a37b2f94-c1a1-41d7-8f92-baade66a55fa\") " Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.543678 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-czlh7\" (UniqueName: \"kubernetes.io/projected/a37b2f94-c1a1-41d7-8f92-baade66a55fa-kube-api-access-czlh7\") pod \"a37b2f94-c1a1-41d7-8f92-baade66a55fa\" (UID: \"a37b2f94-c1a1-41d7-8f92-baade66a55fa\") " Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.544363 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a37b2f94-c1a1-41d7-8f92-baade66a55fa-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a37b2f94-c1a1-41d7-8f92-baade66a55fa" (UID: "a37b2f94-c1a1-41d7-8f92-baade66a55fa"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.549813 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a37b2f94-c1a1-41d7-8f92-baade66a55fa-kube-api-access-czlh7" (OuterVolumeSpecName: "kube-api-access-czlh7") pod "a37b2f94-c1a1-41d7-8f92-baade66a55fa" (UID: "a37b2f94-c1a1-41d7-8f92-baade66a55fa"). InnerVolumeSpecName "kube-api-access-czlh7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.645741 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/24819069-fb36-4a4b-ae07-9f7d67276eb3-operator-scripts\") pod \"24819069-fb36-4a4b-ae07-9f7d67276eb3\" (UID: \"24819069-fb36-4a4b-ae07-9f7d67276eb3\") " Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.645822 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ac58ff5-0d42-4ac9-b966-d628a21a5c91-operator-scripts\") pod \"8ac58ff5-0d42-4ac9-b966-d628a21a5c91\" (UID: \"8ac58ff5-0d42-4ac9-b966-d628a21a5c91\") " Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.645860 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nhhn6\" (UniqueName: \"kubernetes.io/projected/24819069-fb36-4a4b-ae07-9f7d67276eb3-kube-api-access-nhhn6\") pod \"24819069-fb36-4a4b-ae07-9f7d67276eb3\" (UID: \"24819069-fb36-4a4b-ae07-9f7d67276eb3\") " Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.646011 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2rlzz\" (UniqueName: \"kubernetes.io/projected/8ac58ff5-0d42-4ac9-b966-d628a21a5c91-kube-api-access-2rlzz\") pod \"8ac58ff5-0d42-4ac9-b966-d628a21a5c91\" (UID: \"8ac58ff5-0d42-4ac9-b966-d628a21a5c91\") " Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.646351 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ac58ff5-0d42-4ac9-b966-d628a21a5c91-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8ac58ff5-0d42-4ac9-b966-d628a21a5c91" (UID: "8ac58ff5-0d42-4ac9-b966-d628a21a5c91"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.646360 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24819069-fb36-4a4b-ae07-9f7d67276eb3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "24819069-fb36-4a4b-ae07-9f7d67276eb3" (UID: "24819069-fb36-4a4b-ae07-9f7d67276eb3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.646434 5045 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a37b2f94-c1a1-41d7-8f92-baade66a55fa-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.646449 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-czlh7\" (UniqueName: \"kubernetes.io/projected/a37b2f94-c1a1-41d7-8f92-baade66a55fa-kube-api-access-czlh7\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.649946 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24819069-fb36-4a4b-ae07-9f7d67276eb3-kube-api-access-nhhn6" (OuterVolumeSpecName: "kube-api-access-nhhn6") pod "24819069-fb36-4a4b-ae07-9f7d67276eb3" (UID: "24819069-fb36-4a4b-ae07-9f7d67276eb3"). InnerVolumeSpecName "kube-api-access-nhhn6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.651980 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ac58ff5-0d42-4ac9-b966-d628a21a5c91-kube-api-access-2rlzz" (OuterVolumeSpecName: "kube-api-access-2rlzz") pod "8ac58ff5-0d42-4ac9-b966-d628a21a5c91" (UID: "8ac58ff5-0d42-4ac9-b966-d628a21a5c91"). InnerVolumeSpecName "kube-api-access-2rlzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.748746 5045 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/24819069-fb36-4a4b-ae07-9f7d67276eb3-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.748803 5045 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ac58ff5-0d42-4ac9-b966-d628a21a5c91-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.748823 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nhhn6\" (UniqueName: \"kubernetes.io/projected/24819069-fb36-4a4b-ae07-9f7d67276eb3-kube-api-access-nhhn6\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:10 crc kubenswrapper[5045]: I1125 23:17:10.748885 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2rlzz\" (UniqueName: \"kubernetes.io/projected/8ac58ff5-0d42-4ac9-b966-d628a21a5c91-kube-api-access-2rlzz\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:11 crc kubenswrapper[5045]: I1125 23:17:11.007184 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-59af-account-create-update-v57d9" Nov 25 23:17:11 crc kubenswrapper[5045]: I1125 23:17:11.007217 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-59af-account-create-update-v57d9" event={"ID":"24819069-fb36-4a4b-ae07-9f7d67276eb3","Type":"ContainerDied","Data":"ee3b151808ab36d8f6aae420c0c1e54697a89eeedd6ab8224fab942e19499e6e"} Nov 25 23:17:11 crc kubenswrapper[5045]: I1125 23:17:11.007262 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ee3b151808ab36d8f6aae420c0c1e54697a89eeedd6ab8224fab942e19499e6e" Nov 25 23:17:11 crc kubenswrapper[5045]: I1125 23:17:11.010441 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-82cz2" event={"ID":"8ac58ff5-0d42-4ac9-b966-d628a21a5c91","Type":"ContainerDied","Data":"da35eece10c57f990d0584b989efa27463977a7601248ff7b7432f6d662043fc"} Nov 25 23:17:11 crc kubenswrapper[5045]: I1125 23:17:11.010486 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-82cz2" Nov 25 23:17:11 crc kubenswrapper[5045]: I1125 23:17:11.010504 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da35eece10c57f990d0584b989efa27463977a7601248ff7b7432f6d662043fc" Nov 25 23:17:11 crc kubenswrapper[5045]: I1125 23:17:11.013401 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-lf45m" event={"ID":"a37b2f94-c1a1-41d7-8f92-baade66a55fa","Type":"ContainerDied","Data":"158b64c59f422ff2b7f4414a867979ec6e56edfef611c4ceea8dc3db3aced078"} Nov 25 23:17:11 crc kubenswrapper[5045]: I1125 23:17:11.013461 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="158b64c59f422ff2b7f4414a867979ec6e56edfef611c4ceea8dc3db3aced078" Nov 25 23:17:11 crc kubenswrapper[5045]: I1125 23:17:11.013617 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-lf45m" Nov 25 23:17:11 crc kubenswrapper[5045]: I1125 23:17:11.458290 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6b4a-account-create-update-rm6k5" Nov 25 23:17:11 crc kubenswrapper[5045]: I1125 23:17:11.562272 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65dd8927-af0c-4e6e-99d8-7990e24a959c-operator-scripts\") pod \"65dd8927-af0c-4e6e-99d8-7990e24a959c\" (UID: \"65dd8927-af0c-4e6e-99d8-7990e24a959c\") " Nov 25 23:17:11 crc kubenswrapper[5045]: I1125 23:17:11.562580 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-829pn\" (UniqueName: \"kubernetes.io/projected/65dd8927-af0c-4e6e-99d8-7990e24a959c-kube-api-access-829pn\") pod \"65dd8927-af0c-4e6e-99d8-7990e24a959c\" (UID: \"65dd8927-af0c-4e6e-99d8-7990e24a959c\") " Nov 25 23:17:11 crc kubenswrapper[5045]: I1125 23:17:11.562640 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65dd8927-af0c-4e6e-99d8-7990e24a959c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "65dd8927-af0c-4e6e-99d8-7990e24a959c" (UID: "65dd8927-af0c-4e6e-99d8-7990e24a959c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:11 crc kubenswrapper[5045]: I1125 23:17:11.563007 5045 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65dd8927-af0c-4e6e-99d8-7990e24a959c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:11 crc kubenswrapper[5045]: I1125 23:17:11.566836 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65dd8927-af0c-4e6e-99d8-7990e24a959c-kube-api-access-829pn" (OuterVolumeSpecName: "kube-api-access-829pn") pod "65dd8927-af0c-4e6e-99d8-7990e24a959c" (UID: "65dd8927-af0c-4e6e-99d8-7990e24a959c"). InnerVolumeSpecName "kube-api-access-829pn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:17:11 crc kubenswrapper[5045]: I1125 23:17:11.664705 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-829pn\" (UniqueName: \"kubernetes.io/projected/65dd8927-af0c-4e6e-99d8-7990e24a959c-kube-api-access-829pn\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:12 crc kubenswrapper[5045]: I1125 23:17:12.033511 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6b4a-account-create-update-rm6k5" event={"ID":"65dd8927-af0c-4e6e-99d8-7990e24a959c","Type":"ContainerDied","Data":"39568f21065c36dcf0d7fad48158f995f67b0c3e7dd529fb9f6baf3f6e1fdf3f"} Nov 25 23:17:12 crc kubenswrapper[5045]: I1125 23:17:12.033562 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39568f21065c36dcf0d7fad48158f995f67b0c3e7dd529fb9f6baf3f6e1fdf3f" Nov 25 23:17:12 crc kubenswrapper[5045]: I1125 23:17:12.033604 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6b4a-account-create-update-rm6k5" Nov 25 23:17:12 crc kubenswrapper[5045]: I1125 23:17:12.979629 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-t4zxw"] Nov 25 23:17:12 crc kubenswrapper[5045]: E1125 23:17:12.980019 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65dd8927-af0c-4e6e-99d8-7990e24a959c" containerName="mariadb-account-create-update" Nov 25 23:17:12 crc kubenswrapper[5045]: I1125 23:17:12.980036 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="65dd8927-af0c-4e6e-99d8-7990e24a959c" containerName="mariadb-account-create-update" Nov 25 23:17:12 crc kubenswrapper[5045]: E1125 23:17:12.980066 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24819069-fb36-4a4b-ae07-9f7d67276eb3" containerName="mariadb-account-create-update" Nov 25 23:17:12 crc kubenswrapper[5045]: I1125 23:17:12.980076 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="24819069-fb36-4a4b-ae07-9f7d67276eb3" containerName="mariadb-account-create-update" Nov 25 23:17:12 crc kubenswrapper[5045]: E1125 23:17:12.980101 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc65709d-5a60-4967-a2fa-21df985f4f82" containerName="mariadb-account-create-update" Nov 25 23:17:12 crc kubenswrapper[5045]: I1125 23:17:12.980110 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc65709d-5a60-4967-a2fa-21df985f4f82" containerName="mariadb-account-create-update" Nov 25 23:17:12 crc kubenswrapper[5045]: E1125 23:17:12.980120 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a37b2f94-c1a1-41d7-8f92-baade66a55fa" containerName="mariadb-database-create" Nov 25 23:17:12 crc kubenswrapper[5045]: I1125 23:17:12.980127 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="a37b2f94-c1a1-41d7-8f92-baade66a55fa" containerName="mariadb-database-create" Nov 25 23:17:12 crc kubenswrapper[5045]: E1125 23:17:12.980138 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ac58ff5-0d42-4ac9-b966-d628a21a5c91" containerName="mariadb-database-create" Nov 25 23:17:12 crc kubenswrapper[5045]: I1125 23:17:12.980145 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ac58ff5-0d42-4ac9-b966-d628a21a5c91" containerName="mariadb-database-create" Nov 25 23:17:12 crc kubenswrapper[5045]: E1125 23:17:12.980157 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4df595bc-d45d-4560-9203-c57daba13a4b" containerName="mariadb-database-create" Nov 25 23:17:12 crc kubenswrapper[5045]: I1125 23:17:12.980165 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="4df595bc-d45d-4560-9203-c57daba13a4b" containerName="mariadb-database-create" Nov 25 23:17:12 crc kubenswrapper[5045]: I1125 23:17:12.980333 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="a37b2f94-c1a1-41d7-8f92-baade66a55fa" containerName="mariadb-database-create" Nov 25 23:17:12 crc kubenswrapper[5045]: I1125 23:17:12.980351 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="24819069-fb36-4a4b-ae07-9f7d67276eb3" containerName="mariadb-account-create-update" Nov 25 23:17:12 crc kubenswrapper[5045]: I1125 23:17:12.980363 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc65709d-5a60-4967-a2fa-21df985f4f82" containerName="mariadb-account-create-update" Nov 25 23:17:12 crc kubenswrapper[5045]: I1125 23:17:12.980376 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ac58ff5-0d42-4ac9-b966-d628a21a5c91" containerName="mariadb-database-create" Nov 25 23:17:12 crc kubenswrapper[5045]: I1125 23:17:12.980393 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="4df595bc-d45d-4560-9203-c57daba13a4b" containerName="mariadb-database-create" Nov 25 23:17:12 crc kubenswrapper[5045]: I1125 23:17:12.980403 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="65dd8927-af0c-4e6e-99d8-7990e24a959c" containerName="mariadb-account-create-update" Nov 25 23:17:12 crc kubenswrapper[5045]: I1125 23:17:12.981041 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-t4zxw" Nov 25 23:17:12 crc kubenswrapper[5045]: I1125 23:17:12.982815 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-pjcxf" Nov 25 23:17:12 crc kubenswrapper[5045]: I1125 23:17:12.983208 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 25 23:17:12 crc kubenswrapper[5045]: I1125 23:17:12.991569 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-t4zxw"] Nov 25 23:17:13 crc kubenswrapper[5045]: I1125 23:17:13.090557 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-config-data\") pod \"glance-db-sync-t4zxw\" (UID: \"5db210ab-fa77-45b7-a0f8-d115ad2a5f73\") " pod="openstack/glance-db-sync-t4zxw" Nov 25 23:17:13 crc kubenswrapper[5045]: I1125 23:17:13.090929 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xhb2\" (UniqueName: \"kubernetes.io/projected/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-kube-api-access-7xhb2\") pod \"glance-db-sync-t4zxw\" (UID: \"5db210ab-fa77-45b7-a0f8-d115ad2a5f73\") " pod="openstack/glance-db-sync-t4zxw" Nov 25 23:17:13 crc kubenswrapper[5045]: I1125 23:17:13.090997 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-db-sync-config-data\") pod \"glance-db-sync-t4zxw\" (UID: \"5db210ab-fa77-45b7-a0f8-d115ad2a5f73\") " pod="openstack/glance-db-sync-t4zxw" Nov 25 23:17:13 crc kubenswrapper[5045]: I1125 23:17:13.091162 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-combined-ca-bundle\") pod \"glance-db-sync-t4zxw\" (UID: \"5db210ab-fa77-45b7-a0f8-d115ad2a5f73\") " pod="openstack/glance-db-sync-t4zxw" Nov 25 23:17:13 crc kubenswrapper[5045]: I1125 23:17:13.193060 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xhb2\" (UniqueName: \"kubernetes.io/projected/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-kube-api-access-7xhb2\") pod \"glance-db-sync-t4zxw\" (UID: \"5db210ab-fa77-45b7-a0f8-d115ad2a5f73\") " pod="openstack/glance-db-sync-t4zxw" Nov 25 23:17:13 crc kubenswrapper[5045]: I1125 23:17:13.193438 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-db-sync-config-data\") pod \"glance-db-sync-t4zxw\" (UID: \"5db210ab-fa77-45b7-a0f8-d115ad2a5f73\") " pod="openstack/glance-db-sync-t4zxw" Nov 25 23:17:13 crc kubenswrapper[5045]: I1125 23:17:13.193524 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-combined-ca-bundle\") pod \"glance-db-sync-t4zxw\" (UID: \"5db210ab-fa77-45b7-a0f8-d115ad2a5f73\") " pod="openstack/glance-db-sync-t4zxw" Nov 25 23:17:13 crc kubenswrapper[5045]: I1125 23:17:13.193662 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-config-data\") pod \"glance-db-sync-t4zxw\" (UID: \"5db210ab-fa77-45b7-a0f8-d115ad2a5f73\") " pod="openstack/glance-db-sync-t4zxw" Nov 25 23:17:13 crc kubenswrapper[5045]: I1125 23:17:13.198942 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-config-data\") pod \"glance-db-sync-t4zxw\" (UID: \"5db210ab-fa77-45b7-a0f8-d115ad2a5f73\") " pod="openstack/glance-db-sync-t4zxw" Nov 25 23:17:13 crc kubenswrapper[5045]: I1125 23:17:13.200949 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-db-sync-config-data\") pod \"glance-db-sync-t4zxw\" (UID: \"5db210ab-fa77-45b7-a0f8-d115ad2a5f73\") " pod="openstack/glance-db-sync-t4zxw" Nov 25 23:17:13 crc kubenswrapper[5045]: I1125 23:17:13.201518 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-combined-ca-bundle\") pod \"glance-db-sync-t4zxw\" (UID: \"5db210ab-fa77-45b7-a0f8-d115ad2a5f73\") " pod="openstack/glance-db-sync-t4zxw" Nov 25 23:17:13 crc kubenswrapper[5045]: I1125 23:17:13.220922 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xhb2\" (UniqueName: \"kubernetes.io/projected/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-kube-api-access-7xhb2\") pod \"glance-db-sync-t4zxw\" (UID: \"5db210ab-fa77-45b7-a0f8-d115ad2a5f73\") " pod="openstack/glance-db-sync-t4zxw" Nov 25 23:17:13 crc kubenswrapper[5045]: I1125 23:17:13.307903 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-t4zxw" Nov 25 23:17:13 crc kubenswrapper[5045]: I1125 23:17:13.654101 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-t4zxw"] Nov 25 23:17:14 crc kubenswrapper[5045]: I1125 23:17:14.050558 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-t4zxw" event={"ID":"5db210ab-fa77-45b7-a0f8-d115ad2a5f73","Type":"ContainerStarted","Data":"6d44f8930fde198b97c9f5efe1a68fa1f672faa81e4d97636a13084ea8cee387"} Nov 25 23:17:18 crc kubenswrapper[5045]: I1125 23:17:18.054845 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-kfwsl" podUID="efb42386-0a5d-423f-b31e-13e9433271ba" containerName="ovn-controller" probeResult="failure" output=< Nov 25 23:17:18 crc kubenswrapper[5045]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 25 23:17:18 crc kubenswrapper[5045]: > Nov 25 23:17:20 crc kubenswrapper[5045]: I1125 23:17:20.109409 5045 generic.go:334] "Generic (PLEG): container finished" podID="b33fb38e-e7c7-4bb6-92ee-f98e45e71a95" containerID="c4c8fbce961154e8c88d1959873b9c4f63603220c3203632dd1b0f746dab6d48" exitCode=0 Nov 25 23:17:20 crc kubenswrapper[5045]: I1125 23:17:20.109497 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95","Type":"ContainerDied","Data":"c4c8fbce961154e8c88d1959873b9c4f63603220c3203632dd1b0f746dab6d48"} Nov 25 23:17:21 crc kubenswrapper[5045]: I1125 23:17:21.127230 5045 generic.go:334] "Generic (PLEG): container finished" podID="7fac1a35-2303-42d7-b27b-410ecff1b89a" containerID="08bb8b30e0afb480d73c05026da488ee13cf6aa9849f2d7245739149bf2c503d" exitCode=0 Nov 25 23:17:21 crc kubenswrapper[5045]: I1125 23:17:21.127320 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7fac1a35-2303-42d7-b27b-410ecff1b89a","Type":"ContainerDied","Data":"08bb8b30e0afb480d73c05026da488ee13cf6aa9849f2d7245739149bf2c503d"} Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.055966 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-kfwsl" podUID="efb42386-0a5d-423f-b31e-13e9433271ba" containerName="ovn-controller" probeResult="failure" output=< Nov 25 23:17:23 crc kubenswrapper[5045]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 25 23:17:23 crc kubenswrapper[5045]: > Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.079758 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.093275 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-z2vw6" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.305765 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-kfwsl-config-vjvh2"] Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.307044 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.309617 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.318331 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-kfwsl-config-vjvh2"] Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.472799 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-var-log-ovn\") pod \"ovn-controller-kfwsl-config-vjvh2\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.472877 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-scripts\") pod \"ovn-controller-kfwsl-config-vjvh2\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.472962 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hqqm\" (UniqueName: \"kubernetes.io/projected/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-kube-api-access-6hqqm\") pod \"ovn-controller-kfwsl-config-vjvh2\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.472992 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-additional-scripts\") pod \"ovn-controller-kfwsl-config-vjvh2\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.473014 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-var-run-ovn\") pod \"ovn-controller-kfwsl-config-vjvh2\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.473174 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-var-run\") pod \"ovn-controller-kfwsl-config-vjvh2\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.574694 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-var-run\") pod \"ovn-controller-kfwsl-config-vjvh2\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.574767 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-var-log-ovn\") pod \"ovn-controller-kfwsl-config-vjvh2\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.574806 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-scripts\") pod \"ovn-controller-kfwsl-config-vjvh2\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.575103 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-var-log-ovn\") pod \"ovn-controller-kfwsl-config-vjvh2\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.575137 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-var-run\") pod \"ovn-controller-kfwsl-config-vjvh2\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.576355 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hqqm\" (UniqueName: \"kubernetes.io/projected/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-kube-api-access-6hqqm\") pod \"ovn-controller-kfwsl-config-vjvh2\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.576432 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-additional-scripts\") pod \"ovn-controller-kfwsl-config-vjvh2\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.576475 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-var-run-ovn\") pod \"ovn-controller-kfwsl-config-vjvh2\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.576636 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-var-run-ovn\") pod \"ovn-controller-kfwsl-config-vjvh2\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.577207 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-additional-scripts\") pod \"ovn-controller-kfwsl-config-vjvh2\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.579159 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-scripts\") pod \"ovn-controller-kfwsl-config-vjvh2\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.602759 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hqqm\" (UniqueName: \"kubernetes.io/projected/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-kube-api-access-6hqqm\") pod \"ovn-controller-kfwsl-config-vjvh2\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:23 crc kubenswrapper[5045]: I1125 23:17:23.637644 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:24 crc kubenswrapper[5045]: I1125 23:17:24.452911 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-kfwsl-config-vjvh2"] Nov 25 23:17:24 crc kubenswrapper[5045]: W1125 23:17:24.462129 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ee3c10f_3296_4554_8bab_80bfcbaa8de9.slice/crio-2c5940d1a77cb1a82589a7ab519d715511946ba31e7dc414eca2ce1141b5dce0 WatchSource:0}: Error finding container 2c5940d1a77cb1a82589a7ab519d715511946ba31e7dc414eca2ce1141b5dce0: Status 404 returned error can't find the container with id 2c5940d1a77cb1a82589a7ab519d715511946ba31e7dc414eca2ce1141b5dce0 Nov 25 23:17:25 crc kubenswrapper[5045]: I1125 23:17:25.162649 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95","Type":"ContainerStarted","Data":"ef19997e431b7b51dc41393e77173873feaaa537886ee4ea8bc5516637ecbe1e"} Nov 25 23:17:25 crc kubenswrapper[5045]: I1125 23:17:25.163224 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:17:25 crc kubenswrapper[5045]: I1125 23:17:25.168147 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7fac1a35-2303-42d7-b27b-410ecff1b89a","Type":"ContainerStarted","Data":"108bbe3c7f2336095b7729d5f6a4df78e7574816cf9e9b31981b21af2743f586"} Nov 25 23:17:25 crc kubenswrapper[5045]: I1125 23:17:25.168475 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 23:17:25 crc kubenswrapper[5045]: I1125 23:17:25.176608 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-t4zxw" event={"ID":"5db210ab-fa77-45b7-a0f8-d115ad2a5f73","Type":"ContainerStarted","Data":"5be049f291347f48c5690dcb0687d99696e28e794691ed94708e39dfd227cbf8"} Nov 25 23:17:25 crc kubenswrapper[5045]: I1125 23:17:25.178055 5045 generic.go:334] "Generic (PLEG): container finished" podID="2ee3c10f-3296-4554-8bab-80bfcbaa8de9" containerID="8a11996119b6d30f900e89c829331367f50c87e5755083d713c6178e2570c206" exitCode=0 Nov 25 23:17:25 crc kubenswrapper[5045]: I1125 23:17:25.178118 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-kfwsl-config-vjvh2" event={"ID":"2ee3c10f-3296-4554-8bab-80bfcbaa8de9","Type":"ContainerDied","Data":"8a11996119b6d30f900e89c829331367f50c87e5755083d713c6178e2570c206"} Nov 25 23:17:25 crc kubenswrapper[5045]: I1125 23:17:25.178145 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-kfwsl-config-vjvh2" event={"ID":"2ee3c10f-3296-4554-8bab-80bfcbaa8de9","Type":"ContainerStarted","Data":"2c5940d1a77cb1a82589a7ab519d715511946ba31e7dc414eca2ce1141b5dce0"} Nov 25 23:17:25 crc kubenswrapper[5045]: I1125 23:17:25.198433 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=54.622843395 podStartE2EDuration="1m2.198406596s" podCreationTimestamp="2025-11-25 23:16:23 +0000 UTC" firstStartedPulling="2025-11-25 23:16:37.73637174 +0000 UTC m=+1054.094030852" lastFinishedPulling="2025-11-25 23:16:45.311934901 +0000 UTC m=+1061.669594053" observedRunningTime="2025-11-25 23:17:25.186927946 +0000 UTC m=+1101.544587148" watchObservedRunningTime="2025-11-25 23:17:25.198406596 +0000 UTC m=+1101.556065748" Nov 25 23:17:25 crc kubenswrapper[5045]: I1125 23:17:25.227046 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=55.479086505 podStartE2EDuration="1m3.227023489s" podCreationTimestamp="2025-11-25 23:16:22 +0000 UTC" firstStartedPulling="2025-11-25 23:16:38.366551156 +0000 UTC m=+1054.724210288" lastFinishedPulling="2025-11-25 23:16:46.11448813 +0000 UTC m=+1062.472147272" observedRunningTime="2025-11-25 23:17:25.219867633 +0000 UTC m=+1101.577526745" watchObservedRunningTime="2025-11-25 23:17:25.227023489 +0000 UTC m=+1101.584682611" Nov 25 23:17:26 crc kubenswrapper[5045]: I1125 23:17:26.851167 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:26 crc kubenswrapper[5045]: I1125 23:17:26.877859 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-t4zxw" podStartSLOduration=4.414083194 podStartE2EDuration="14.877833984s" podCreationTimestamp="2025-11-25 23:17:12 +0000 UTC" firstStartedPulling="2025-11-25 23:17:13.663028382 +0000 UTC m=+1090.020687494" lastFinishedPulling="2025-11-25 23:17:24.126779172 +0000 UTC m=+1100.484438284" observedRunningTime="2025-11-25 23:17:25.264703901 +0000 UTC m=+1101.622363013" watchObservedRunningTime="2025-11-25 23:17:26.877833984 +0000 UTC m=+1103.235493136" Nov 25 23:17:26 crc kubenswrapper[5045]: I1125 23:17:26.929961 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-var-run\") pod \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " Nov 25 23:17:26 crc kubenswrapper[5045]: I1125 23:17:26.930012 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-var-log-ovn\") pod \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " Nov 25 23:17:26 crc kubenswrapper[5045]: I1125 23:17:26.930060 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-var-run-ovn\") pod \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " Nov 25 23:17:26 crc kubenswrapper[5045]: I1125 23:17:26.930106 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-additional-scripts\") pod \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " Nov 25 23:17:26 crc kubenswrapper[5045]: I1125 23:17:26.930149 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "2ee3c10f-3296-4554-8bab-80bfcbaa8de9" (UID: "2ee3c10f-3296-4554-8bab-80bfcbaa8de9"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:17:26 crc kubenswrapper[5045]: I1125 23:17:26.930173 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hqqm\" (UniqueName: \"kubernetes.io/projected/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-kube-api-access-6hqqm\") pod \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " Nov 25 23:17:26 crc kubenswrapper[5045]: I1125 23:17:26.930148 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "2ee3c10f-3296-4554-8bab-80bfcbaa8de9" (UID: "2ee3c10f-3296-4554-8bab-80bfcbaa8de9"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:17:26 crc kubenswrapper[5045]: I1125 23:17:26.930280 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-scripts\") pod \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\" (UID: \"2ee3c10f-3296-4554-8bab-80bfcbaa8de9\") " Nov 25 23:17:26 crc kubenswrapper[5045]: I1125 23:17:26.930663 5045 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:26 crc kubenswrapper[5045]: I1125 23:17:26.930683 5045 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:26 crc kubenswrapper[5045]: I1125 23:17:26.931377 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "2ee3c10f-3296-4554-8bab-80bfcbaa8de9" (UID: "2ee3c10f-3296-4554-8bab-80bfcbaa8de9"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:26 crc kubenswrapper[5045]: I1125 23:17:26.931544 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-scripts" (OuterVolumeSpecName: "scripts") pod "2ee3c10f-3296-4554-8bab-80bfcbaa8de9" (UID: "2ee3c10f-3296-4554-8bab-80bfcbaa8de9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:26 crc kubenswrapper[5045]: I1125 23:17:26.930105 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-var-run" (OuterVolumeSpecName: "var-run") pod "2ee3c10f-3296-4554-8bab-80bfcbaa8de9" (UID: "2ee3c10f-3296-4554-8bab-80bfcbaa8de9"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:17:26 crc kubenswrapper[5045]: I1125 23:17:26.940994 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-kube-api-access-6hqqm" (OuterVolumeSpecName: "kube-api-access-6hqqm") pod "2ee3c10f-3296-4554-8bab-80bfcbaa8de9" (UID: "2ee3c10f-3296-4554-8bab-80bfcbaa8de9"). InnerVolumeSpecName "kube-api-access-6hqqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:17:27 crc kubenswrapper[5045]: I1125 23:17:27.032693 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hqqm\" (UniqueName: \"kubernetes.io/projected/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-kube-api-access-6hqqm\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:27 crc kubenswrapper[5045]: I1125 23:17:27.032752 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:27 crc kubenswrapper[5045]: I1125 23:17:27.032762 5045 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-var-run\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:27 crc kubenswrapper[5045]: I1125 23:17:27.032773 5045 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2ee3c10f-3296-4554-8bab-80bfcbaa8de9-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:27 crc kubenswrapper[5045]: I1125 23:17:27.199673 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-kfwsl-config-vjvh2" event={"ID":"2ee3c10f-3296-4554-8bab-80bfcbaa8de9","Type":"ContainerDied","Data":"2c5940d1a77cb1a82589a7ab519d715511946ba31e7dc414eca2ce1141b5dce0"} Nov 25 23:17:27 crc kubenswrapper[5045]: I1125 23:17:27.199741 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2c5940d1a77cb1a82589a7ab519d715511946ba31e7dc414eca2ce1141b5dce0" Nov 25 23:17:27 crc kubenswrapper[5045]: I1125 23:17:27.199819 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-kfwsl-config-vjvh2" Nov 25 23:17:27 crc kubenswrapper[5045]: I1125 23:17:27.978835 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-kfwsl-config-vjvh2"] Nov 25 23:17:27 crc kubenswrapper[5045]: I1125 23:17:27.987939 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-kfwsl-config-vjvh2"] Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.052249 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-kfwsl-config-bhf85"] Nov 25 23:17:28 crc kubenswrapper[5045]: E1125 23:17:28.052820 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ee3c10f-3296-4554-8bab-80bfcbaa8de9" containerName="ovn-config" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.052891 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ee3c10f-3296-4554-8bab-80bfcbaa8de9" containerName="ovn-config" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.053096 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ee3c10f-3296-4554-8bab-80bfcbaa8de9" containerName="ovn-config" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.053774 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.057493 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.064578 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-kfwsl-config-bhf85"] Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.068368 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-kfwsl" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.148466 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/22044da8-8e0f-4891-b270-3ae8e907f2db-additional-scripts\") pod \"ovn-controller-kfwsl-config-bhf85\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.148517 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/22044da8-8e0f-4891-b270-3ae8e907f2db-scripts\") pod \"ovn-controller-kfwsl-config-bhf85\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.148560 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9z587\" (UniqueName: \"kubernetes.io/projected/22044da8-8e0f-4891-b270-3ae8e907f2db-kube-api-access-9z587\") pod \"ovn-controller-kfwsl-config-bhf85\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.148581 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/22044da8-8e0f-4891-b270-3ae8e907f2db-var-run\") pod \"ovn-controller-kfwsl-config-bhf85\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.148605 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/22044da8-8e0f-4891-b270-3ae8e907f2db-var-run-ovn\") pod \"ovn-controller-kfwsl-config-bhf85\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.148643 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/22044da8-8e0f-4891-b270-3ae8e907f2db-var-log-ovn\") pod \"ovn-controller-kfwsl-config-bhf85\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.250117 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/22044da8-8e0f-4891-b270-3ae8e907f2db-additional-scripts\") pod \"ovn-controller-kfwsl-config-bhf85\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.250179 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/22044da8-8e0f-4891-b270-3ae8e907f2db-scripts\") pod \"ovn-controller-kfwsl-config-bhf85\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.250218 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9z587\" (UniqueName: \"kubernetes.io/projected/22044da8-8e0f-4891-b270-3ae8e907f2db-kube-api-access-9z587\") pod \"ovn-controller-kfwsl-config-bhf85\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.250238 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/22044da8-8e0f-4891-b270-3ae8e907f2db-var-run\") pod \"ovn-controller-kfwsl-config-bhf85\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.250260 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/22044da8-8e0f-4891-b270-3ae8e907f2db-var-run-ovn\") pod \"ovn-controller-kfwsl-config-bhf85\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.250292 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/22044da8-8e0f-4891-b270-3ae8e907f2db-var-log-ovn\") pod \"ovn-controller-kfwsl-config-bhf85\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.250610 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/22044da8-8e0f-4891-b270-3ae8e907f2db-var-log-ovn\") pod \"ovn-controller-kfwsl-config-bhf85\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.250661 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/22044da8-8e0f-4891-b270-3ae8e907f2db-var-run\") pod \"ovn-controller-kfwsl-config-bhf85\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.250694 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/22044da8-8e0f-4891-b270-3ae8e907f2db-var-run-ovn\") pod \"ovn-controller-kfwsl-config-bhf85\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.251474 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/22044da8-8e0f-4891-b270-3ae8e907f2db-additional-scripts\") pod \"ovn-controller-kfwsl-config-bhf85\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.252480 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/22044da8-8e0f-4891-b270-3ae8e907f2db-scripts\") pod \"ovn-controller-kfwsl-config-bhf85\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.279905 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9z587\" (UniqueName: \"kubernetes.io/projected/22044da8-8e0f-4891-b270-3ae8e907f2db-kube-api-access-9z587\") pod \"ovn-controller-kfwsl-config-bhf85\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.375184 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.406121 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ee3c10f-3296-4554-8bab-80bfcbaa8de9" path="/var/lib/kubelet/pods/2ee3c10f-3296-4554-8bab-80bfcbaa8de9/volumes" Nov 25 23:17:28 crc kubenswrapper[5045]: I1125 23:17:28.816528 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-kfwsl-config-bhf85"] Nov 25 23:17:28 crc kubenswrapper[5045]: W1125 23:17:28.819566 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod22044da8_8e0f_4891_b270_3ae8e907f2db.slice/crio-16c1d7937074d80f219d9d000cb15d37966616db27751dc8e49bccfce54537d3 WatchSource:0}: Error finding container 16c1d7937074d80f219d9d000cb15d37966616db27751dc8e49bccfce54537d3: Status 404 returned error can't find the container with id 16c1d7937074d80f219d9d000cb15d37966616db27751dc8e49bccfce54537d3 Nov 25 23:17:29 crc kubenswrapper[5045]: I1125 23:17:29.217133 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-kfwsl-config-bhf85" event={"ID":"22044da8-8e0f-4891-b270-3ae8e907f2db","Type":"ContainerStarted","Data":"aebede3e1e21acb7317b76aa4914dcd7b9f7422f4ae8d79f3b9ef122da51bee6"} Nov 25 23:17:29 crc kubenswrapper[5045]: I1125 23:17:29.217490 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-kfwsl-config-bhf85" event={"ID":"22044da8-8e0f-4891-b270-3ae8e907f2db","Type":"ContainerStarted","Data":"16c1d7937074d80f219d9d000cb15d37966616db27751dc8e49bccfce54537d3"} Nov 25 23:17:29 crc kubenswrapper[5045]: I1125 23:17:29.237625 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-kfwsl-config-bhf85" podStartSLOduration=1.237603724 podStartE2EDuration="1.237603724s" podCreationTimestamp="2025-11-25 23:17:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:17:29.235522344 +0000 UTC m=+1105.593181466" watchObservedRunningTime="2025-11-25 23:17:29.237603724 +0000 UTC m=+1105.595262846" Nov 25 23:17:30 crc kubenswrapper[5045]: I1125 23:17:30.224294 5045 generic.go:334] "Generic (PLEG): container finished" podID="22044da8-8e0f-4891-b270-3ae8e907f2db" containerID="aebede3e1e21acb7317b76aa4914dcd7b9f7422f4ae8d79f3b9ef122da51bee6" exitCode=0 Nov 25 23:17:30 crc kubenswrapper[5045]: I1125 23:17:30.224381 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-kfwsl-config-bhf85" event={"ID":"22044da8-8e0f-4891-b270-3ae8e907f2db","Type":"ContainerDied","Data":"aebede3e1e21acb7317b76aa4914dcd7b9f7422f4ae8d79f3b9ef122da51bee6"} Nov 25 23:17:30 crc kubenswrapper[5045]: I1125 23:17:30.540738 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:17:30 crc kubenswrapper[5045]: I1125 23:17:30.541048 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.237174 5045 generic.go:334] "Generic (PLEG): container finished" podID="5db210ab-fa77-45b7-a0f8-d115ad2a5f73" containerID="5be049f291347f48c5690dcb0687d99696e28e794691ed94708e39dfd227cbf8" exitCode=0 Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.237308 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-t4zxw" event={"ID":"5db210ab-fa77-45b7-a0f8-d115ad2a5f73","Type":"ContainerDied","Data":"5be049f291347f48c5690dcb0687d99696e28e794691ed94708e39dfd227cbf8"} Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.652476 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.806175 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/22044da8-8e0f-4891-b270-3ae8e907f2db-var-log-ovn\") pod \"22044da8-8e0f-4891-b270-3ae8e907f2db\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.806277 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/22044da8-8e0f-4891-b270-3ae8e907f2db-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "22044da8-8e0f-4891-b270-3ae8e907f2db" (UID: "22044da8-8e0f-4891-b270-3ae8e907f2db"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.806338 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/22044da8-8e0f-4891-b270-3ae8e907f2db-additional-scripts\") pod \"22044da8-8e0f-4891-b270-3ae8e907f2db\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.806373 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/22044da8-8e0f-4891-b270-3ae8e907f2db-var-run\") pod \"22044da8-8e0f-4891-b270-3ae8e907f2db\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.806442 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9z587\" (UniqueName: \"kubernetes.io/projected/22044da8-8e0f-4891-b270-3ae8e907f2db-kube-api-access-9z587\") pod \"22044da8-8e0f-4891-b270-3ae8e907f2db\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.806528 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/22044da8-8e0f-4891-b270-3ae8e907f2db-var-run-ovn\") pod \"22044da8-8e0f-4891-b270-3ae8e907f2db\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.806554 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/22044da8-8e0f-4891-b270-3ae8e907f2db-scripts\") pod \"22044da8-8e0f-4891-b270-3ae8e907f2db\" (UID: \"22044da8-8e0f-4891-b270-3ae8e907f2db\") " Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.806738 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/22044da8-8e0f-4891-b270-3ae8e907f2db-var-run" (OuterVolumeSpecName: "var-run") pod "22044da8-8e0f-4891-b270-3ae8e907f2db" (UID: "22044da8-8e0f-4891-b270-3ae8e907f2db"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.806776 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/22044da8-8e0f-4891-b270-3ae8e907f2db-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "22044da8-8e0f-4891-b270-3ae8e907f2db" (UID: "22044da8-8e0f-4891-b270-3ae8e907f2db"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.807143 5045 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/22044da8-8e0f-4891-b270-3ae8e907f2db-var-run\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.807168 5045 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/22044da8-8e0f-4891-b270-3ae8e907f2db-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.807188 5045 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/22044da8-8e0f-4891-b270-3ae8e907f2db-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.807960 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22044da8-8e0f-4891-b270-3ae8e907f2db-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "22044da8-8e0f-4891-b270-3ae8e907f2db" (UID: "22044da8-8e0f-4891-b270-3ae8e907f2db"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.808034 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22044da8-8e0f-4891-b270-3ae8e907f2db-scripts" (OuterVolumeSpecName: "scripts") pod "22044da8-8e0f-4891-b270-3ae8e907f2db" (UID: "22044da8-8e0f-4891-b270-3ae8e907f2db"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.814131 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22044da8-8e0f-4891-b270-3ae8e907f2db-kube-api-access-9z587" (OuterVolumeSpecName: "kube-api-access-9z587") pod "22044da8-8e0f-4891-b270-3ae8e907f2db" (UID: "22044da8-8e0f-4891-b270-3ae8e907f2db"). InnerVolumeSpecName "kube-api-access-9z587". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.908792 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9z587\" (UniqueName: \"kubernetes.io/projected/22044da8-8e0f-4891-b270-3ae8e907f2db-kube-api-access-9z587\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.908851 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/22044da8-8e0f-4891-b270-3ae8e907f2db-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:31 crc kubenswrapper[5045]: I1125 23:17:31.908871 5045 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/22044da8-8e0f-4891-b270-3ae8e907f2db-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:32 crc kubenswrapper[5045]: I1125 23:17:32.250602 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-kfwsl-config-bhf85" event={"ID":"22044da8-8e0f-4891-b270-3ae8e907f2db","Type":"ContainerDied","Data":"16c1d7937074d80f219d9d000cb15d37966616db27751dc8e49bccfce54537d3"} Nov 25 23:17:32 crc kubenswrapper[5045]: I1125 23:17:32.250654 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="16c1d7937074d80f219d9d000cb15d37966616db27751dc8e49bccfce54537d3" Nov 25 23:17:32 crc kubenswrapper[5045]: I1125 23:17:32.250655 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-kfwsl-config-bhf85" Nov 25 23:17:32 crc kubenswrapper[5045]: I1125 23:17:32.322311 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-kfwsl-config-bhf85"] Nov 25 23:17:32 crc kubenswrapper[5045]: I1125 23:17:32.335213 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-kfwsl-config-bhf85"] Nov 25 23:17:32 crc kubenswrapper[5045]: I1125 23:17:32.408358 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22044da8-8e0f-4891-b270-3ae8e907f2db" path="/var/lib/kubelet/pods/22044da8-8e0f-4891-b270-3ae8e907f2db/volumes" Nov 25 23:17:32 crc kubenswrapper[5045]: I1125 23:17:32.690121 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-t4zxw" Nov 25 23:17:32 crc kubenswrapper[5045]: I1125 23:17:32.824780 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7xhb2\" (UniqueName: \"kubernetes.io/projected/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-kube-api-access-7xhb2\") pod \"5db210ab-fa77-45b7-a0f8-d115ad2a5f73\" (UID: \"5db210ab-fa77-45b7-a0f8-d115ad2a5f73\") " Nov 25 23:17:32 crc kubenswrapper[5045]: I1125 23:17:32.824897 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-combined-ca-bundle\") pod \"5db210ab-fa77-45b7-a0f8-d115ad2a5f73\" (UID: \"5db210ab-fa77-45b7-a0f8-d115ad2a5f73\") " Nov 25 23:17:32 crc kubenswrapper[5045]: I1125 23:17:32.824944 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-db-sync-config-data\") pod \"5db210ab-fa77-45b7-a0f8-d115ad2a5f73\" (UID: \"5db210ab-fa77-45b7-a0f8-d115ad2a5f73\") " Nov 25 23:17:32 crc kubenswrapper[5045]: I1125 23:17:32.825024 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-config-data\") pod \"5db210ab-fa77-45b7-a0f8-d115ad2a5f73\" (UID: \"5db210ab-fa77-45b7-a0f8-d115ad2a5f73\") " Nov 25 23:17:32 crc kubenswrapper[5045]: I1125 23:17:32.830741 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "5db210ab-fa77-45b7-a0f8-d115ad2a5f73" (UID: "5db210ab-fa77-45b7-a0f8-d115ad2a5f73"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:17:32 crc kubenswrapper[5045]: I1125 23:17:32.839679 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-kube-api-access-7xhb2" (OuterVolumeSpecName: "kube-api-access-7xhb2") pod "5db210ab-fa77-45b7-a0f8-d115ad2a5f73" (UID: "5db210ab-fa77-45b7-a0f8-d115ad2a5f73"). InnerVolumeSpecName "kube-api-access-7xhb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:17:32 crc kubenswrapper[5045]: I1125 23:17:32.868861 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-config-data" (OuterVolumeSpecName: "config-data") pod "5db210ab-fa77-45b7-a0f8-d115ad2a5f73" (UID: "5db210ab-fa77-45b7-a0f8-d115ad2a5f73"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:17:32 crc kubenswrapper[5045]: I1125 23:17:32.876530 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5db210ab-fa77-45b7-a0f8-d115ad2a5f73" (UID: "5db210ab-fa77-45b7-a0f8-d115ad2a5f73"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:17:32 crc kubenswrapper[5045]: I1125 23:17:32.927165 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7xhb2\" (UniqueName: \"kubernetes.io/projected/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-kube-api-access-7xhb2\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:32 crc kubenswrapper[5045]: I1125 23:17:32.927201 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:32 crc kubenswrapper[5045]: I1125 23:17:32.927211 5045 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:32 crc kubenswrapper[5045]: I1125 23:17:32.927222 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5db210ab-fa77-45b7-a0f8-d115ad2a5f73-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:33 crc kubenswrapper[5045]: I1125 23:17:33.259269 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-t4zxw" event={"ID":"5db210ab-fa77-45b7-a0f8-d115ad2a5f73","Type":"ContainerDied","Data":"6d44f8930fde198b97c9f5efe1a68fa1f672faa81e4d97636a13084ea8cee387"} Nov 25 23:17:33 crc kubenswrapper[5045]: I1125 23:17:33.260284 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d44f8930fde198b97c9f5efe1a68fa1f672faa81e4d97636a13084ea8cee387" Nov 25 23:17:33 crc kubenswrapper[5045]: I1125 23:17:33.259375 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-t4zxw" Nov 25 23:17:33 crc kubenswrapper[5045]: I1125 23:17:33.814837 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-554567b4f7-6m45j"] Nov 25 23:17:33 crc kubenswrapper[5045]: E1125 23:17:33.815133 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22044da8-8e0f-4891-b270-3ae8e907f2db" containerName="ovn-config" Nov 25 23:17:33 crc kubenswrapper[5045]: I1125 23:17:33.815149 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="22044da8-8e0f-4891-b270-3ae8e907f2db" containerName="ovn-config" Nov 25 23:17:33 crc kubenswrapper[5045]: E1125 23:17:33.815179 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5db210ab-fa77-45b7-a0f8-d115ad2a5f73" containerName="glance-db-sync" Nov 25 23:17:33 crc kubenswrapper[5045]: I1125 23:17:33.815186 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="5db210ab-fa77-45b7-a0f8-d115ad2a5f73" containerName="glance-db-sync" Nov 25 23:17:33 crc kubenswrapper[5045]: I1125 23:17:33.815326 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="5db210ab-fa77-45b7-a0f8-d115ad2a5f73" containerName="glance-db-sync" Nov 25 23:17:33 crc kubenswrapper[5045]: I1125 23:17:33.815339 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="22044da8-8e0f-4891-b270-3ae8e907f2db" containerName="ovn-config" Nov 25 23:17:33 crc kubenswrapper[5045]: I1125 23:17:33.816116 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:17:33 crc kubenswrapper[5045]: I1125 23:17:33.845901 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-554567b4f7-6m45j"] Nov 25 23:17:33 crc kubenswrapper[5045]: I1125 23:17:33.960169 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dpm8\" (UniqueName: \"kubernetes.io/projected/5055ae4f-3f89-4770-830f-0486ee87709f-kube-api-access-9dpm8\") pod \"dnsmasq-dns-554567b4f7-6m45j\" (UID: \"5055ae4f-3f89-4770-830f-0486ee87709f\") " pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:17:33 crc kubenswrapper[5045]: I1125 23:17:33.960229 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-dns-svc\") pod \"dnsmasq-dns-554567b4f7-6m45j\" (UID: \"5055ae4f-3f89-4770-830f-0486ee87709f\") " pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:17:33 crc kubenswrapper[5045]: I1125 23:17:33.960302 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-ovsdbserver-sb\") pod \"dnsmasq-dns-554567b4f7-6m45j\" (UID: \"5055ae4f-3f89-4770-830f-0486ee87709f\") " pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:17:33 crc kubenswrapper[5045]: I1125 23:17:33.960539 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-ovsdbserver-nb\") pod \"dnsmasq-dns-554567b4f7-6m45j\" (UID: \"5055ae4f-3f89-4770-830f-0486ee87709f\") " pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:17:33 crc kubenswrapper[5045]: I1125 23:17:33.960652 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-config\") pod \"dnsmasq-dns-554567b4f7-6m45j\" (UID: \"5055ae4f-3f89-4770-830f-0486ee87709f\") " pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.062314 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-ovsdbserver-sb\") pod \"dnsmasq-dns-554567b4f7-6m45j\" (UID: \"5055ae4f-3f89-4770-830f-0486ee87709f\") " pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.062418 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-ovsdbserver-nb\") pod \"dnsmasq-dns-554567b4f7-6m45j\" (UID: \"5055ae4f-3f89-4770-830f-0486ee87709f\") " pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.062460 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-config\") pod \"dnsmasq-dns-554567b4f7-6m45j\" (UID: \"5055ae4f-3f89-4770-830f-0486ee87709f\") " pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.062503 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dpm8\" (UniqueName: \"kubernetes.io/projected/5055ae4f-3f89-4770-830f-0486ee87709f-kube-api-access-9dpm8\") pod \"dnsmasq-dns-554567b4f7-6m45j\" (UID: \"5055ae4f-3f89-4770-830f-0486ee87709f\") " pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.062531 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-dns-svc\") pod \"dnsmasq-dns-554567b4f7-6m45j\" (UID: \"5055ae4f-3f89-4770-830f-0486ee87709f\") " pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.063317 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-ovsdbserver-sb\") pod \"dnsmasq-dns-554567b4f7-6m45j\" (UID: \"5055ae4f-3f89-4770-830f-0486ee87709f\") " pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.063556 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-config\") pod \"dnsmasq-dns-554567b4f7-6m45j\" (UID: \"5055ae4f-3f89-4770-830f-0486ee87709f\") " pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.063564 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-dns-svc\") pod \"dnsmasq-dns-554567b4f7-6m45j\" (UID: \"5055ae4f-3f89-4770-830f-0486ee87709f\") " pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.064087 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-ovsdbserver-nb\") pod \"dnsmasq-dns-554567b4f7-6m45j\" (UID: \"5055ae4f-3f89-4770-830f-0486ee87709f\") " pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.066915 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.089157 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dpm8\" (UniqueName: \"kubernetes.io/projected/5055ae4f-3f89-4770-830f-0486ee87709f-kube-api-access-9dpm8\") pod \"dnsmasq-dns-554567b4f7-6m45j\" (UID: \"5055ae4f-3f89-4770-830f-0486ee87709f\") " pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.130238 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.441859 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.448555 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-wlv7x"] Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.449565 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-wlv7x" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.465298 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-6d43-account-create-update-kmw94"] Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.466342 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6d43-account-create-update-kmw94" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.489263 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.491809 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-wlv7x"] Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.499006 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-6d43-account-create-update-kmw94"] Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.512694 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-554567b4f7-6m45j"] Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.556372 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-dbmm2"] Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.557492 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-dbmm2" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.570853 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4ce2c763-4353-4914-bc30-b4737ca1ffcf-operator-scripts\") pod \"barbican-6d43-account-create-update-kmw94\" (UID: \"4ce2c763-4353-4914-bc30-b4737ca1ffcf\") " pod="openstack/barbican-6d43-account-create-update-kmw94" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.570952 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1a79af51-25bb-4e3f-a735-c67a4bc01360-operator-scripts\") pod \"barbican-db-create-wlv7x\" (UID: \"1a79af51-25bb-4e3f-a735-c67a4bc01360\") " pod="openstack/barbican-db-create-wlv7x" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.571041 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h72r8\" (UniqueName: \"kubernetes.io/projected/1a79af51-25bb-4e3f-a735-c67a4bc01360-kube-api-access-h72r8\") pod \"barbican-db-create-wlv7x\" (UID: \"1a79af51-25bb-4e3f-a735-c67a4bc01360\") " pod="openstack/barbican-db-create-wlv7x" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.571058 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tm7tb\" (UniqueName: \"kubernetes.io/projected/4ce2c763-4353-4914-bc30-b4737ca1ffcf-kube-api-access-tm7tb\") pod \"barbican-6d43-account-create-update-kmw94\" (UID: \"4ce2c763-4353-4914-bc30-b4737ca1ffcf\") " pod="openstack/barbican-6d43-account-create-update-kmw94" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.570855 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-dbmm2"] Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.635412 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-9751-account-create-update-mt6kc"] Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.636701 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-9751-account-create-update-mt6kc" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.638207 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.651011 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-9751-account-create-update-mt6kc"] Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.671955 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h72r8\" (UniqueName: \"kubernetes.io/projected/1a79af51-25bb-4e3f-a735-c67a4bc01360-kube-api-access-h72r8\") pod \"barbican-db-create-wlv7x\" (UID: \"1a79af51-25bb-4e3f-a735-c67a4bc01360\") " pod="openstack/barbican-db-create-wlv7x" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.672001 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tm7tb\" (UniqueName: \"kubernetes.io/projected/4ce2c763-4353-4914-bc30-b4737ca1ffcf-kube-api-access-tm7tb\") pod \"barbican-6d43-account-create-update-kmw94\" (UID: \"4ce2c763-4353-4914-bc30-b4737ca1ffcf\") " pod="openstack/barbican-6d43-account-create-update-kmw94" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.672039 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbz7r\" (UniqueName: \"kubernetes.io/projected/6c12eb36-5101-42cd-85f1-b9cc0ff4dc89-kube-api-access-hbz7r\") pod \"cinder-db-create-dbmm2\" (UID: \"6c12eb36-5101-42cd-85f1-b9cc0ff4dc89\") " pod="openstack/cinder-db-create-dbmm2" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.672113 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c12eb36-5101-42cd-85f1-b9cc0ff4dc89-operator-scripts\") pod \"cinder-db-create-dbmm2\" (UID: \"6c12eb36-5101-42cd-85f1-b9cc0ff4dc89\") " pod="openstack/cinder-db-create-dbmm2" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.672135 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4ce2c763-4353-4914-bc30-b4737ca1ffcf-operator-scripts\") pod \"barbican-6d43-account-create-update-kmw94\" (UID: \"4ce2c763-4353-4914-bc30-b4737ca1ffcf\") " pod="openstack/barbican-6d43-account-create-update-kmw94" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.672187 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1a79af51-25bb-4e3f-a735-c67a4bc01360-operator-scripts\") pod \"barbican-db-create-wlv7x\" (UID: \"1a79af51-25bb-4e3f-a735-c67a4bc01360\") " pod="openstack/barbican-db-create-wlv7x" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.672877 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4ce2c763-4353-4914-bc30-b4737ca1ffcf-operator-scripts\") pod \"barbican-6d43-account-create-update-kmw94\" (UID: \"4ce2c763-4353-4914-bc30-b4737ca1ffcf\") " pod="openstack/barbican-6d43-account-create-update-kmw94" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.673519 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1a79af51-25bb-4e3f-a735-c67a4bc01360-operator-scripts\") pod \"barbican-db-create-wlv7x\" (UID: \"1a79af51-25bb-4e3f-a735-c67a4bc01360\") " pod="openstack/barbican-db-create-wlv7x" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.697642 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tm7tb\" (UniqueName: \"kubernetes.io/projected/4ce2c763-4353-4914-bc30-b4737ca1ffcf-kube-api-access-tm7tb\") pod \"barbican-6d43-account-create-update-kmw94\" (UID: \"4ce2c763-4353-4914-bc30-b4737ca1ffcf\") " pod="openstack/barbican-6d43-account-create-update-kmw94" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.697806 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h72r8\" (UniqueName: \"kubernetes.io/projected/1a79af51-25bb-4e3f-a735-c67a4bc01360-kube-api-access-h72r8\") pod \"barbican-db-create-wlv7x\" (UID: \"1a79af51-25bb-4e3f-a735-c67a4bc01360\") " pod="openstack/barbican-db-create-wlv7x" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.750880 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-6szwq"] Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.751941 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-6szwq" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.754164 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.754493 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.754764 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.754824 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-fgjn7" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.760986 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-6szwq"] Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.776085 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a371173-44b3-46c2-ad41-31b7387aca8a-operator-scripts\") pod \"cinder-9751-account-create-update-mt6kc\" (UID: \"2a371173-44b3-46c2-ad41-31b7387aca8a\") " pod="openstack/cinder-9751-account-create-update-mt6kc" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.776455 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbz7r\" (UniqueName: \"kubernetes.io/projected/6c12eb36-5101-42cd-85f1-b9cc0ff4dc89-kube-api-access-hbz7r\") pod \"cinder-db-create-dbmm2\" (UID: \"6c12eb36-5101-42cd-85f1-b9cc0ff4dc89\") " pod="openstack/cinder-db-create-dbmm2" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.776604 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44r8l\" (UniqueName: \"kubernetes.io/projected/2a371173-44b3-46c2-ad41-31b7387aca8a-kube-api-access-44r8l\") pod \"cinder-9751-account-create-update-mt6kc\" (UID: \"2a371173-44b3-46c2-ad41-31b7387aca8a\") " pod="openstack/cinder-9751-account-create-update-mt6kc" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.776773 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c12eb36-5101-42cd-85f1-b9cc0ff4dc89-operator-scripts\") pod \"cinder-db-create-dbmm2\" (UID: \"6c12eb36-5101-42cd-85f1-b9cc0ff4dc89\") " pod="openstack/cinder-db-create-dbmm2" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.777655 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c12eb36-5101-42cd-85f1-b9cc0ff4dc89-operator-scripts\") pod \"cinder-db-create-dbmm2\" (UID: \"6c12eb36-5101-42cd-85f1-b9cc0ff4dc89\") " pod="openstack/cinder-db-create-dbmm2" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.792836 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbz7r\" (UniqueName: \"kubernetes.io/projected/6c12eb36-5101-42cd-85f1-b9cc0ff4dc89-kube-api-access-hbz7r\") pod \"cinder-db-create-dbmm2\" (UID: \"6c12eb36-5101-42cd-85f1-b9cc0ff4dc89\") " pod="openstack/cinder-db-create-dbmm2" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.810895 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-wlv7x" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.845635 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-2hc8p"] Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.847009 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-2hc8p" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.850086 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-8c0a-account-create-update-65smx"] Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.851726 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8c0a-account-create-update-65smx" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.854866 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.866661 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6d43-account-create-update-kmw94" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.874356 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-2hc8p"] Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.877792 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddb0f925-b7f9-4daa-ad41-64b73c18a6cc-combined-ca-bundle\") pod \"keystone-db-sync-6szwq\" (UID: \"ddb0f925-b7f9-4daa-ad41-64b73c18a6cc\") " pod="openstack/keystone-db-sync-6szwq" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.877863 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a371173-44b3-46c2-ad41-31b7387aca8a-operator-scripts\") pod \"cinder-9751-account-create-update-mt6kc\" (UID: \"2a371173-44b3-46c2-ad41-31b7387aca8a\") " pod="openstack/cinder-9751-account-create-update-mt6kc" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.877938 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44r8l\" (UniqueName: \"kubernetes.io/projected/2a371173-44b3-46c2-ad41-31b7387aca8a-kube-api-access-44r8l\") pod \"cinder-9751-account-create-update-mt6kc\" (UID: \"2a371173-44b3-46c2-ad41-31b7387aca8a\") " pod="openstack/cinder-9751-account-create-update-mt6kc" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.877967 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghx76\" (UniqueName: \"kubernetes.io/projected/ddb0f925-b7f9-4daa-ad41-64b73c18a6cc-kube-api-access-ghx76\") pod \"keystone-db-sync-6szwq\" (UID: \"ddb0f925-b7f9-4daa-ad41-64b73c18a6cc\") " pod="openstack/keystone-db-sync-6szwq" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.877996 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddb0f925-b7f9-4daa-ad41-64b73c18a6cc-config-data\") pod \"keystone-db-sync-6szwq\" (UID: \"ddb0f925-b7f9-4daa-ad41-64b73c18a6cc\") " pod="openstack/keystone-db-sync-6szwq" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.878640 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a371173-44b3-46c2-ad41-31b7387aca8a-operator-scripts\") pod \"cinder-9751-account-create-update-mt6kc\" (UID: \"2a371173-44b3-46c2-ad41-31b7387aca8a\") " pod="openstack/cinder-9751-account-create-update-mt6kc" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.887221 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8c0a-account-create-update-65smx"] Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.891833 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-dbmm2" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.914057 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44r8l\" (UniqueName: \"kubernetes.io/projected/2a371173-44b3-46c2-ad41-31b7387aca8a-kube-api-access-44r8l\") pod \"cinder-9751-account-create-update-mt6kc\" (UID: \"2a371173-44b3-46c2-ad41-31b7387aca8a\") " pod="openstack/cinder-9751-account-create-update-mt6kc" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.955320 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-9751-account-create-update-mt6kc" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.979452 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6kgpf\" (UniqueName: \"kubernetes.io/projected/416caee6-d98c-4f85-a3a7-e23594648a25-kube-api-access-6kgpf\") pod \"neutron-db-create-2hc8p\" (UID: \"416caee6-d98c-4f85-a3a7-e23594648a25\") " pod="openstack/neutron-db-create-2hc8p" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.979504 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6g9vf\" (UniqueName: \"kubernetes.io/projected/56361182-254c-43f6-893c-9f83d9942fe3-kube-api-access-6g9vf\") pod \"neutron-8c0a-account-create-update-65smx\" (UID: \"56361182-254c-43f6-893c-9f83d9942fe3\") " pod="openstack/neutron-8c0a-account-create-update-65smx" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.979541 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghx76\" (UniqueName: \"kubernetes.io/projected/ddb0f925-b7f9-4daa-ad41-64b73c18a6cc-kube-api-access-ghx76\") pod \"keystone-db-sync-6szwq\" (UID: \"ddb0f925-b7f9-4daa-ad41-64b73c18a6cc\") " pod="openstack/keystone-db-sync-6szwq" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.979569 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddb0f925-b7f9-4daa-ad41-64b73c18a6cc-config-data\") pod \"keystone-db-sync-6szwq\" (UID: \"ddb0f925-b7f9-4daa-ad41-64b73c18a6cc\") " pod="openstack/keystone-db-sync-6szwq" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.979593 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/416caee6-d98c-4f85-a3a7-e23594648a25-operator-scripts\") pod \"neutron-db-create-2hc8p\" (UID: \"416caee6-d98c-4f85-a3a7-e23594648a25\") " pod="openstack/neutron-db-create-2hc8p" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.979633 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddb0f925-b7f9-4daa-ad41-64b73c18a6cc-combined-ca-bundle\") pod \"keystone-db-sync-6szwq\" (UID: \"ddb0f925-b7f9-4daa-ad41-64b73c18a6cc\") " pod="openstack/keystone-db-sync-6szwq" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.979660 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/56361182-254c-43f6-893c-9f83d9942fe3-operator-scripts\") pod \"neutron-8c0a-account-create-update-65smx\" (UID: \"56361182-254c-43f6-893c-9f83d9942fe3\") " pod="openstack/neutron-8c0a-account-create-update-65smx" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.984626 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddb0f925-b7f9-4daa-ad41-64b73c18a6cc-config-data\") pod \"keystone-db-sync-6szwq\" (UID: \"ddb0f925-b7f9-4daa-ad41-64b73c18a6cc\") " pod="openstack/keystone-db-sync-6szwq" Nov 25 23:17:34 crc kubenswrapper[5045]: I1125 23:17:34.987122 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddb0f925-b7f9-4daa-ad41-64b73c18a6cc-combined-ca-bundle\") pod \"keystone-db-sync-6szwq\" (UID: \"ddb0f925-b7f9-4daa-ad41-64b73c18a6cc\") " pod="openstack/keystone-db-sync-6szwq" Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.000688 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghx76\" (UniqueName: \"kubernetes.io/projected/ddb0f925-b7f9-4daa-ad41-64b73c18a6cc-kube-api-access-ghx76\") pod \"keystone-db-sync-6szwq\" (UID: \"ddb0f925-b7f9-4daa-ad41-64b73c18a6cc\") " pod="openstack/keystone-db-sync-6szwq" Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.078379 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-6szwq" Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.083034 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgpf\" (UniqueName: \"kubernetes.io/projected/416caee6-d98c-4f85-a3a7-e23594648a25-kube-api-access-6kgpf\") pod \"neutron-db-create-2hc8p\" (UID: \"416caee6-d98c-4f85-a3a7-e23594648a25\") " pod="openstack/neutron-db-create-2hc8p" Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.083184 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6g9vf\" (UniqueName: \"kubernetes.io/projected/56361182-254c-43f6-893c-9f83d9942fe3-kube-api-access-6g9vf\") pod \"neutron-8c0a-account-create-update-65smx\" (UID: \"56361182-254c-43f6-893c-9f83d9942fe3\") " pod="openstack/neutron-8c0a-account-create-update-65smx" Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.083312 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/416caee6-d98c-4f85-a3a7-e23594648a25-operator-scripts\") pod \"neutron-db-create-2hc8p\" (UID: \"416caee6-d98c-4f85-a3a7-e23594648a25\") " pod="openstack/neutron-db-create-2hc8p" Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.084453 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/416caee6-d98c-4f85-a3a7-e23594648a25-operator-scripts\") pod \"neutron-db-create-2hc8p\" (UID: \"416caee6-d98c-4f85-a3a7-e23594648a25\") " pod="openstack/neutron-db-create-2hc8p" Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.085198 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/56361182-254c-43f6-893c-9f83d9942fe3-operator-scripts\") pod \"neutron-8c0a-account-create-update-65smx\" (UID: \"56361182-254c-43f6-893c-9f83d9942fe3\") " pod="openstack/neutron-8c0a-account-create-update-65smx" Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.086646 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/56361182-254c-43f6-893c-9f83d9942fe3-operator-scripts\") pod \"neutron-8c0a-account-create-update-65smx\" (UID: \"56361182-254c-43f6-893c-9f83d9942fe3\") " pod="openstack/neutron-8c0a-account-create-update-65smx" Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.113774 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kgpf\" (UniqueName: \"kubernetes.io/projected/416caee6-d98c-4f85-a3a7-e23594648a25-kube-api-access-6kgpf\") pod \"neutron-db-create-2hc8p\" (UID: \"416caee6-d98c-4f85-a3a7-e23594648a25\") " pod="openstack/neutron-db-create-2hc8p" Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.116362 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6g9vf\" (UniqueName: \"kubernetes.io/projected/56361182-254c-43f6-893c-9f83d9942fe3-kube-api-access-6g9vf\") pod \"neutron-8c0a-account-create-update-65smx\" (UID: \"56361182-254c-43f6-893c-9f83d9942fe3\") " pod="openstack/neutron-8c0a-account-create-update-65smx" Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.173673 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-2hc8p" Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.180460 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8c0a-account-create-update-65smx" Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.312239 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-wlv7x"] Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.312874 5045 generic.go:334] "Generic (PLEG): container finished" podID="5055ae4f-3f89-4770-830f-0486ee87709f" containerID="e256fda05dbc9ba68182176daf859eb373e91a0000e664bebd787c72c3836dd1" exitCode=0 Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.312916 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-554567b4f7-6m45j" event={"ID":"5055ae4f-3f89-4770-830f-0486ee87709f","Type":"ContainerDied","Data":"e256fda05dbc9ba68182176daf859eb373e91a0000e664bebd787c72c3836dd1"} Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.312942 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-554567b4f7-6m45j" event={"ID":"5055ae4f-3f89-4770-830f-0486ee87709f","Type":"ContainerStarted","Data":"3e5289e66525150e2224f081fea0a18565c5b8232ddc2aa1ccf783b8d04fb5f5"} Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.556338 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-6d43-account-create-update-kmw94"] Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.620154 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-dbmm2"] Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.631369 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-9751-account-create-update-mt6kc"] Nov 25 23:17:35 crc kubenswrapper[5045]: W1125 23:17:35.644562 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a371173_44b3_46c2_ad41_31b7387aca8a.slice/crio-5c72b6ce3b53a93d4b0fe1403c1cb855556747f92f4b303ddc4afa312c219213 WatchSource:0}: Error finding container 5c72b6ce3b53a93d4b0fe1403c1cb855556747f92f4b303ddc4afa312c219213: Status 404 returned error can't find the container with id 5c72b6ce3b53a93d4b0fe1403c1cb855556747f92f4b303ddc4afa312c219213 Nov 25 23:17:35 crc kubenswrapper[5045]: W1125 23:17:35.644847 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c12eb36_5101_42cd_85f1_b9cc0ff4dc89.slice/crio-d8df878f5a6ee106fb119f0f2d546331340c167cec4a65e8f5b531777ca36df5 WatchSource:0}: Error finding container d8df878f5a6ee106fb119f0f2d546331340c167cec4a65e8f5b531777ca36df5: Status 404 returned error can't find the container with id d8df878f5a6ee106fb119f0f2d546331340c167cec4a65e8f5b531777ca36df5 Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.783460 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-2hc8p"] Nov 25 23:17:35 crc kubenswrapper[5045]: W1125 23:17:35.788148 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod416caee6_d98c_4f85_a3a7_e23594648a25.slice/crio-4932c376a6f250cf34118351019317bdff6399967acaa5cdc3946ad8b49282f3 WatchSource:0}: Error finding container 4932c376a6f250cf34118351019317bdff6399967acaa5cdc3946ad8b49282f3: Status 404 returned error can't find the container with id 4932c376a6f250cf34118351019317bdff6399967acaa5cdc3946ad8b49282f3 Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.844569 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-6szwq"] Nov 25 23:17:35 crc kubenswrapper[5045]: I1125 23:17:35.882403 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8c0a-account-create-update-65smx"] Nov 25 23:17:35 crc kubenswrapper[5045]: W1125 23:17:35.901322 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod56361182_254c_43f6_893c_9f83d9942fe3.slice/crio-54aac8615c084e48c26d87a5ad2ee7c63c4fe2247f501c3d492ac079bf8d6855 WatchSource:0}: Error finding container 54aac8615c084e48c26d87a5ad2ee7c63c4fe2247f501c3d492ac079bf8d6855: Status 404 returned error can't find the container with id 54aac8615c084e48c26d87a5ad2ee7c63c4fe2247f501c3d492ac079bf8d6855 Nov 25 23:17:35 crc kubenswrapper[5045]: W1125 23:17:35.901816 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podddb0f925_b7f9_4daa_ad41_64b73c18a6cc.slice/crio-f971ae6fcac12566ec92cdf9fd933619384f65f8999d0015f881cf4c42e43adf WatchSource:0}: Error finding container f971ae6fcac12566ec92cdf9fd933619384f65f8999d0015f881cf4c42e43adf: Status 404 returned error can't find the container with id f971ae6fcac12566ec92cdf9fd933619384f65f8999d0015f881cf4c42e43adf Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.334168 5045 generic.go:334] "Generic (PLEG): container finished" podID="1a79af51-25bb-4e3f-a735-c67a4bc01360" containerID="410aaad66169913c92b452e5a56fe7085e2bfc6b8ec920800a3e41f635fe543f" exitCode=0 Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.334241 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-wlv7x" event={"ID":"1a79af51-25bb-4e3f-a735-c67a4bc01360","Type":"ContainerDied","Data":"410aaad66169913c92b452e5a56fe7085e2bfc6b8ec920800a3e41f635fe543f"} Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.334267 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-wlv7x" event={"ID":"1a79af51-25bb-4e3f-a735-c67a4bc01360","Type":"ContainerStarted","Data":"f5793975b07eeb61b7670195a3f7e5bd84ff13f1f1f8cb5026bacfa128aece2e"} Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.342636 5045 generic.go:334] "Generic (PLEG): container finished" podID="6c12eb36-5101-42cd-85f1-b9cc0ff4dc89" containerID="f0ae5b9351cc0125384d0dbe3a2d09b21cc4e2bc66b54532f3936f8881dae6df" exitCode=0 Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.342806 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-dbmm2" event={"ID":"6c12eb36-5101-42cd-85f1-b9cc0ff4dc89","Type":"ContainerDied","Data":"f0ae5b9351cc0125384d0dbe3a2d09b21cc4e2bc66b54532f3936f8881dae6df"} Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.342846 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-dbmm2" event={"ID":"6c12eb36-5101-42cd-85f1-b9cc0ff4dc89","Type":"ContainerStarted","Data":"d8df878f5a6ee106fb119f0f2d546331340c167cec4a65e8f5b531777ca36df5"} Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.350259 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-6szwq" event={"ID":"ddb0f925-b7f9-4daa-ad41-64b73c18a6cc","Type":"ContainerStarted","Data":"f971ae6fcac12566ec92cdf9fd933619384f65f8999d0015f881cf4c42e43adf"} Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.355823 5045 generic.go:334] "Generic (PLEG): container finished" podID="2a371173-44b3-46c2-ad41-31b7387aca8a" containerID="cf07e355747c9268cd065bfe7115f250d1e52bef25c3b22cadab9634532ed660" exitCode=0 Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.355901 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-9751-account-create-update-mt6kc" event={"ID":"2a371173-44b3-46c2-ad41-31b7387aca8a","Type":"ContainerDied","Data":"cf07e355747c9268cd065bfe7115f250d1e52bef25c3b22cadab9634532ed660"} Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.355934 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-9751-account-create-update-mt6kc" event={"ID":"2a371173-44b3-46c2-ad41-31b7387aca8a","Type":"ContainerStarted","Data":"5c72b6ce3b53a93d4b0fe1403c1cb855556747f92f4b303ddc4afa312c219213"} Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.361336 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-554567b4f7-6m45j" event={"ID":"5055ae4f-3f89-4770-830f-0486ee87709f","Type":"ContainerStarted","Data":"b2645a5e64c645dc30be18d75e252a66962927ec3df855aeb2b5123aae296645"} Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.362008 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.366432 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8c0a-account-create-update-65smx" event={"ID":"56361182-254c-43f6-893c-9f83d9942fe3","Type":"ContainerStarted","Data":"a736b048d86b6c834f984d731e342c161c26aaeb8ab6c7cfee488c12fbd6cf98"} Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.366471 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8c0a-account-create-update-65smx" event={"ID":"56361182-254c-43f6-893c-9f83d9942fe3","Type":"ContainerStarted","Data":"54aac8615c084e48c26d87a5ad2ee7c63c4fe2247f501c3d492ac079bf8d6855"} Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.370459 5045 generic.go:334] "Generic (PLEG): container finished" podID="416caee6-d98c-4f85-a3a7-e23594648a25" containerID="344cec385ed3c28abf262abbe66459da36fe46ecdec1e1a5d7e0a97f4607f020" exitCode=0 Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.370528 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-2hc8p" event={"ID":"416caee6-d98c-4f85-a3a7-e23594648a25","Type":"ContainerDied","Data":"344cec385ed3c28abf262abbe66459da36fe46ecdec1e1a5d7e0a97f4607f020"} Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.370570 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-2hc8p" event={"ID":"416caee6-d98c-4f85-a3a7-e23594648a25","Type":"ContainerStarted","Data":"4932c376a6f250cf34118351019317bdff6399967acaa5cdc3946ad8b49282f3"} Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.373691 5045 generic.go:334] "Generic (PLEG): container finished" podID="4ce2c763-4353-4914-bc30-b4737ca1ffcf" containerID="2e633e2246b417362f8104c5a5e2bc6b247de7d4432f5cd917063516ba7f409f" exitCode=0 Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.373751 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-6d43-account-create-update-kmw94" event={"ID":"4ce2c763-4353-4914-bc30-b4737ca1ffcf","Type":"ContainerDied","Data":"2e633e2246b417362f8104c5a5e2bc6b247de7d4432f5cd917063516ba7f409f"} Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.373776 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-6d43-account-create-update-kmw94" event={"ID":"4ce2c763-4353-4914-bc30-b4737ca1ffcf","Type":"ContainerStarted","Data":"912e6fa4ff6465632fe4775b35836afa623cfffde8d88c8c40f758e9dcff8245"} Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.434656 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-554567b4f7-6m45j" podStartSLOduration=3.434640303 podStartE2EDuration="3.434640303s" podCreationTimestamp="2025-11-25 23:17:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:17:36.429539926 +0000 UTC m=+1112.787199038" watchObservedRunningTime="2025-11-25 23:17:36.434640303 +0000 UTC m=+1112.792299415" Nov 25 23:17:36 crc kubenswrapper[5045]: I1125 23:17:36.464911 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-8c0a-account-create-update-65smx" podStartSLOduration=2.464893742 podStartE2EDuration="2.464893742s" podCreationTimestamp="2025-11-25 23:17:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:17:36.460592488 +0000 UTC m=+1112.818251600" watchObservedRunningTime="2025-11-25 23:17:36.464893742 +0000 UTC m=+1112.822552854" Nov 25 23:17:37 crc kubenswrapper[5045]: I1125 23:17:37.382079 5045 generic.go:334] "Generic (PLEG): container finished" podID="56361182-254c-43f6-893c-9f83d9942fe3" containerID="a736b048d86b6c834f984d731e342c161c26aaeb8ab6c7cfee488c12fbd6cf98" exitCode=0 Nov 25 23:17:37 crc kubenswrapper[5045]: I1125 23:17:37.382607 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8c0a-account-create-update-65smx" event={"ID":"56361182-254c-43f6-893c-9f83d9942fe3","Type":"ContainerDied","Data":"a736b048d86b6c834f984d731e342c161c26aaeb8ab6c7cfee488c12fbd6cf98"} Nov 25 23:17:37 crc kubenswrapper[5045]: I1125 23:17:37.782939 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-9751-account-create-update-mt6kc" Nov 25 23:17:37 crc kubenswrapper[5045]: I1125 23:17:37.850346 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a371173-44b3-46c2-ad41-31b7387aca8a-operator-scripts\") pod \"2a371173-44b3-46c2-ad41-31b7387aca8a\" (UID: \"2a371173-44b3-46c2-ad41-31b7387aca8a\") " Nov 25 23:17:37 crc kubenswrapper[5045]: I1125 23:17:37.850599 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44r8l\" (UniqueName: \"kubernetes.io/projected/2a371173-44b3-46c2-ad41-31b7387aca8a-kube-api-access-44r8l\") pod \"2a371173-44b3-46c2-ad41-31b7387aca8a\" (UID: \"2a371173-44b3-46c2-ad41-31b7387aca8a\") " Nov 25 23:17:37 crc kubenswrapper[5045]: I1125 23:17:37.850946 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a371173-44b3-46c2-ad41-31b7387aca8a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2a371173-44b3-46c2-ad41-31b7387aca8a" (UID: "2a371173-44b3-46c2-ad41-31b7387aca8a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:37 crc kubenswrapper[5045]: I1125 23:17:37.874058 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a371173-44b3-46c2-ad41-31b7387aca8a-kube-api-access-44r8l" (OuterVolumeSpecName: "kube-api-access-44r8l") pod "2a371173-44b3-46c2-ad41-31b7387aca8a" (UID: "2a371173-44b3-46c2-ad41-31b7387aca8a"). InnerVolumeSpecName "kube-api-access-44r8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:17:37 crc kubenswrapper[5045]: I1125 23:17:37.953079 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44r8l\" (UniqueName: \"kubernetes.io/projected/2a371173-44b3-46c2-ad41-31b7387aca8a-kube-api-access-44r8l\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:37 crc kubenswrapper[5045]: I1125 23:17:37.953112 5045 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a371173-44b3-46c2-ad41-31b7387aca8a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:37 crc kubenswrapper[5045]: I1125 23:17:37.997850 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-2hc8p" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.008133 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6d43-account-create-update-kmw94" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.019135 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-dbmm2" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.046176 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-wlv7x" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.156022 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6kgpf\" (UniqueName: \"kubernetes.io/projected/416caee6-d98c-4f85-a3a7-e23594648a25-kube-api-access-6kgpf\") pod \"416caee6-d98c-4f85-a3a7-e23594648a25\" (UID: \"416caee6-d98c-4f85-a3a7-e23594648a25\") " Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.156116 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4ce2c763-4353-4914-bc30-b4737ca1ffcf-operator-scripts\") pod \"4ce2c763-4353-4914-bc30-b4737ca1ffcf\" (UID: \"4ce2c763-4353-4914-bc30-b4737ca1ffcf\") " Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.156146 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/416caee6-d98c-4f85-a3a7-e23594648a25-operator-scripts\") pod \"416caee6-d98c-4f85-a3a7-e23594648a25\" (UID: \"416caee6-d98c-4f85-a3a7-e23594648a25\") " Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.156224 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hbz7r\" (UniqueName: \"kubernetes.io/projected/6c12eb36-5101-42cd-85f1-b9cc0ff4dc89-kube-api-access-hbz7r\") pod \"6c12eb36-5101-42cd-85f1-b9cc0ff4dc89\" (UID: \"6c12eb36-5101-42cd-85f1-b9cc0ff4dc89\") " Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.156279 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1a79af51-25bb-4e3f-a735-c67a4bc01360-operator-scripts\") pod \"1a79af51-25bb-4e3f-a735-c67a4bc01360\" (UID: \"1a79af51-25bb-4e3f-a735-c67a4bc01360\") " Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.156303 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tm7tb\" (UniqueName: \"kubernetes.io/projected/4ce2c763-4353-4914-bc30-b4737ca1ffcf-kube-api-access-tm7tb\") pod \"4ce2c763-4353-4914-bc30-b4737ca1ffcf\" (UID: \"4ce2c763-4353-4914-bc30-b4737ca1ffcf\") " Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.156375 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c12eb36-5101-42cd-85f1-b9cc0ff4dc89-operator-scripts\") pod \"6c12eb36-5101-42cd-85f1-b9cc0ff4dc89\" (UID: \"6c12eb36-5101-42cd-85f1-b9cc0ff4dc89\") " Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.156414 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h72r8\" (UniqueName: \"kubernetes.io/projected/1a79af51-25bb-4e3f-a735-c67a4bc01360-kube-api-access-h72r8\") pod \"1a79af51-25bb-4e3f-a735-c67a4bc01360\" (UID: \"1a79af51-25bb-4e3f-a735-c67a4bc01360\") " Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.156926 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a79af51-25bb-4e3f-a735-c67a4bc01360-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1a79af51-25bb-4e3f-a735-c67a4bc01360" (UID: "1a79af51-25bb-4e3f-a735-c67a4bc01360"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.157013 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/416caee6-d98c-4f85-a3a7-e23594648a25-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "416caee6-d98c-4f85-a3a7-e23594648a25" (UID: "416caee6-d98c-4f85-a3a7-e23594648a25"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.157189 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c12eb36-5101-42cd-85f1-b9cc0ff4dc89-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6c12eb36-5101-42cd-85f1-b9cc0ff4dc89" (UID: "6c12eb36-5101-42cd-85f1-b9cc0ff4dc89"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.157443 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4ce2c763-4353-4914-bc30-b4737ca1ffcf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4ce2c763-4353-4914-bc30-b4737ca1ffcf" (UID: "4ce2c763-4353-4914-bc30-b4737ca1ffcf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.157458 5045 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1a79af51-25bb-4e3f-a735-c67a4bc01360-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.157471 5045 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c12eb36-5101-42cd-85f1-b9cc0ff4dc89-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.157480 5045 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/416caee6-d98c-4f85-a3a7-e23594648a25-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.160116 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ce2c763-4353-4914-bc30-b4737ca1ffcf-kube-api-access-tm7tb" (OuterVolumeSpecName: "kube-api-access-tm7tb") pod "4ce2c763-4353-4914-bc30-b4737ca1ffcf" (UID: "4ce2c763-4353-4914-bc30-b4737ca1ffcf"). InnerVolumeSpecName "kube-api-access-tm7tb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.160149 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a79af51-25bb-4e3f-a735-c67a4bc01360-kube-api-access-h72r8" (OuterVolumeSpecName: "kube-api-access-h72r8") pod "1a79af51-25bb-4e3f-a735-c67a4bc01360" (UID: "1a79af51-25bb-4e3f-a735-c67a4bc01360"). InnerVolumeSpecName "kube-api-access-h72r8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.160626 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c12eb36-5101-42cd-85f1-b9cc0ff4dc89-kube-api-access-hbz7r" (OuterVolumeSpecName: "kube-api-access-hbz7r") pod "6c12eb36-5101-42cd-85f1-b9cc0ff4dc89" (UID: "6c12eb36-5101-42cd-85f1-b9cc0ff4dc89"). InnerVolumeSpecName "kube-api-access-hbz7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.161112 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/416caee6-d98c-4f85-a3a7-e23594648a25-kube-api-access-6kgpf" (OuterVolumeSpecName: "kube-api-access-6kgpf") pod "416caee6-d98c-4f85-a3a7-e23594648a25" (UID: "416caee6-d98c-4f85-a3a7-e23594648a25"). InnerVolumeSpecName "kube-api-access-6kgpf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.260768 5045 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4ce2c763-4353-4914-bc30-b4737ca1ffcf-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.261305 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hbz7r\" (UniqueName: \"kubernetes.io/projected/6c12eb36-5101-42cd-85f1-b9cc0ff4dc89-kube-api-access-hbz7r\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.261372 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tm7tb\" (UniqueName: \"kubernetes.io/projected/4ce2c763-4353-4914-bc30-b4737ca1ffcf-kube-api-access-tm7tb\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.261386 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h72r8\" (UniqueName: \"kubernetes.io/projected/1a79af51-25bb-4e3f-a735-c67a4bc01360-kube-api-access-h72r8\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.261396 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6kgpf\" (UniqueName: \"kubernetes.io/projected/416caee6-d98c-4f85-a3a7-e23594648a25-kube-api-access-6kgpf\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.390366 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-9751-account-create-update-mt6kc" event={"ID":"2a371173-44b3-46c2-ad41-31b7387aca8a","Type":"ContainerDied","Data":"5c72b6ce3b53a93d4b0fe1403c1cb855556747f92f4b303ddc4afa312c219213"} Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.390403 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5c72b6ce3b53a93d4b0fe1403c1cb855556747f92f4b303ddc4afa312c219213" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.391570 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-9751-account-create-update-mt6kc" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.392527 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-2hc8p" event={"ID":"416caee6-d98c-4f85-a3a7-e23594648a25","Type":"ContainerDied","Data":"4932c376a6f250cf34118351019317bdff6399967acaa5cdc3946ad8b49282f3"} Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.392569 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4932c376a6f250cf34118351019317bdff6399967acaa5cdc3946ad8b49282f3" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.392624 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-2hc8p" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.404465 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6d43-account-create-update-kmw94" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.407384 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-wlv7x" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.424233 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-6d43-account-create-update-kmw94" event={"ID":"4ce2c763-4353-4914-bc30-b4737ca1ffcf","Type":"ContainerDied","Data":"912e6fa4ff6465632fe4775b35836afa623cfffde8d88c8c40f758e9dcff8245"} Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.424268 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="912e6fa4ff6465632fe4775b35836afa623cfffde8d88c8c40f758e9dcff8245" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.424279 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-wlv7x" event={"ID":"1a79af51-25bb-4e3f-a735-c67a4bc01360","Type":"ContainerDied","Data":"f5793975b07eeb61b7670195a3f7e5bd84ff13f1f1f8cb5026bacfa128aece2e"} Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.424288 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f5793975b07eeb61b7670195a3f7e5bd84ff13f1f1f8cb5026bacfa128aece2e" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.427533 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-dbmm2" Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.428618 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-dbmm2" event={"ID":"6c12eb36-5101-42cd-85f1-b9cc0ff4dc89","Type":"ContainerDied","Data":"d8df878f5a6ee106fb119f0f2d546331340c167cec4a65e8f5b531777ca36df5"} Nov 25 23:17:38 crc kubenswrapper[5045]: I1125 23:17:38.428662 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d8df878f5a6ee106fb119f0f2d546331340c167cec4a65e8f5b531777ca36df5" Nov 25 23:17:41 crc kubenswrapper[5045]: I1125 23:17:41.253336 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8c0a-account-create-update-65smx" Nov 25 23:17:41 crc kubenswrapper[5045]: I1125 23:17:41.416156 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/56361182-254c-43f6-893c-9f83d9942fe3-operator-scripts\") pod \"56361182-254c-43f6-893c-9f83d9942fe3\" (UID: \"56361182-254c-43f6-893c-9f83d9942fe3\") " Nov 25 23:17:41 crc kubenswrapper[5045]: I1125 23:17:41.416240 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g9vf\" (UniqueName: \"kubernetes.io/projected/56361182-254c-43f6-893c-9f83d9942fe3-kube-api-access-6g9vf\") pod \"56361182-254c-43f6-893c-9f83d9942fe3\" (UID: \"56361182-254c-43f6-893c-9f83d9942fe3\") " Nov 25 23:17:41 crc kubenswrapper[5045]: I1125 23:17:41.417596 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56361182-254c-43f6-893c-9f83d9942fe3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "56361182-254c-43f6-893c-9f83d9942fe3" (UID: "56361182-254c-43f6-893c-9f83d9942fe3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:41 crc kubenswrapper[5045]: I1125 23:17:41.422757 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56361182-254c-43f6-893c-9f83d9942fe3-kube-api-access-6g9vf" (OuterVolumeSpecName: "kube-api-access-6g9vf") pod "56361182-254c-43f6-893c-9f83d9942fe3" (UID: "56361182-254c-43f6-893c-9f83d9942fe3"). InnerVolumeSpecName "kube-api-access-6g9vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:17:41 crc kubenswrapper[5045]: I1125 23:17:41.493462 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8c0a-account-create-update-65smx" event={"ID":"56361182-254c-43f6-893c-9f83d9942fe3","Type":"ContainerDied","Data":"54aac8615c084e48c26d87a5ad2ee7c63c4fe2247f501c3d492ac079bf8d6855"} Nov 25 23:17:41 crc kubenswrapper[5045]: I1125 23:17:41.493507 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="54aac8615c084e48c26d87a5ad2ee7c63c4fe2247f501c3d492ac079bf8d6855" Nov 25 23:17:41 crc kubenswrapper[5045]: I1125 23:17:41.493507 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8c0a-account-create-update-65smx" Nov 25 23:17:41 crc kubenswrapper[5045]: I1125 23:17:41.495681 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-6szwq" event={"ID":"ddb0f925-b7f9-4daa-ad41-64b73c18a6cc","Type":"ContainerStarted","Data":"15776c56598a7a3b12a1a16d02731d891849d73c9d23ea55f5cda4c9f25bb029"} Nov 25 23:17:41 crc kubenswrapper[5045]: I1125 23:17:41.518627 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-6szwq" podStartSLOduration=2.257933725 podStartE2EDuration="7.518597711s" podCreationTimestamp="2025-11-25 23:17:34 +0000 UTC" firstStartedPulling="2025-11-25 23:17:35.904617342 +0000 UTC m=+1112.262276454" lastFinishedPulling="2025-11-25 23:17:41.165281328 +0000 UTC m=+1117.522940440" observedRunningTime="2025-11-25 23:17:41.515750429 +0000 UTC m=+1117.873409571" watchObservedRunningTime="2025-11-25 23:17:41.518597711 +0000 UTC m=+1117.876256863" Nov 25 23:17:41 crc kubenswrapper[5045]: I1125 23:17:41.519547 5045 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/56361182-254c-43f6-893c-9f83d9942fe3-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:41 crc kubenswrapper[5045]: I1125 23:17:41.519586 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g9vf\" (UniqueName: \"kubernetes.io/projected/56361182-254c-43f6-893c-9f83d9942fe3-kube-api-access-6g9vf\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.133044 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.240149 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-n2lvl"] Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.240487 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-n2lvl" podUID="e88e13a0-7cae-4c31-86c0-19526b581713" containerName="dnsmasq-dns" containerID="cri-o://0defa8acef3dc500e2170b1690f5377efa0ca3e374a3512c3fcd405d10d412b7" gracePeriod=10 Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.523726 5045 generic.go:334] "Generic (PLEG): container finished" podID="e88e13a0-7cae-4c31-86c0-19526b581713" containerID="0defa8acef3dc500e2170b1690f5377efa0ca3e374a3512c3fcd405d10d412b7" exitCode=0 Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.523880 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-n2lvl" event={"ID":"e88e13a0-7cae-4c31-86c0-19526b581713","Type":"ContainerDied","Data":"0defa8acef3dc500e2170b1690f5377efa0ca3e374a3512c3fcd405d10d412b7"} Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.758327 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.887209 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7lvxz\" (UniqueName: \"kubernetes.io/projected/e88e13a0-7cae-4c31-86c0-19526b581713-kube-api-access-7lvxz\") pod \"e88e13a0-7cae-4c31-86c0-19526b581713\" (UID: \"e88e13a0-7cae-4c31-86c0-19526b581713\") " Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.887301 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-dns-svc\") pod \"e88e13a0-7cae-4c31-86c0-19526b581713\" (UID: \"e88e13a0-7cae-4c31-86c0-19526b581713\") " Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.887408 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-ovsdbserver-nb\") pod \"e88e13a0-7cae-4c31-86c0-19526b581713\" (UID: \"e88e13a0-7cae-4c31-86c0-19526b581713\") " Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.887438 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-ovsdbserver-sb\") pod \"e88e13a0-7cae-4c31-86c0-19526b581713\" (UID: \"e88e13a0-7cae-4c31-86c0-19526b581713\") " Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.887474 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-config\") pod \"e88e13a0-7cae-4c31-86c0-19526b581713\" (UID: \"e88e13a0-7cae-4c31-86c0-19526b581713\") " Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.894286 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e88e13a0-7cae-4c31-86c0-19526b581713-kube-api-access-7lvxz" (OuterVolumeSpecName: "kube-api-access-7lvxz") pod "e88e13a0-7cae-4c31-86c0-19526b581713" (UID: "e88e13a0-7cae-4c31-86c0-19526b581713"). InnerVolumeSpecName "kube-api-access-7lvxz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.946068 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e88e13a0-7cae-4c31-86c0-19526b581713" (UID: "e88e13a0-7cae-4c31-86c0-19526b581713"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.950237 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-config" (OuterVolumeSpecName: "config") pod "e88e13a0-7cae-4c31-86c0-19526b581713" (UID: "e88e13a0-7cae-4c31-86c0-19526b581713"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.954041 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e88e13a0-7cae-4c31-86c0-19526b581713" (UID: "e88e13a0-7cae-4c31-86c0-19526b581713"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.959326 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e88e13a0-7cae-4c31-86c0-19526b581713" (UID: "e88e13a0-7cae-4c31-86c0-19526b581713"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.990167 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7lvxz\" (UniqueName: \"kubernetes.io/projected/e88e13a0-7cae-4c31-86c0-19526b581713-kube-api-access-7lvxz\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.990280 5045 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.990304 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.990350 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:44 crc kubenswrapper[5045]: I1125 23:17:44.990396 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e88e13a0-7cae-4c31-86c0-19526b581713-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:45 crc kubenswrapper[5045]: I1125 23:17:45.538648 5045 generic.go:334] "Generic (PLEG): container finished" podID="ddb0f925-b7f9-4daa-ad41-64b73c18a6cc" containerID="15776c56598a7a3b12a1a16d02731d891849d73c9d23ea55f5cda4c9f25bb029" exitCode=0 Nov 25 23:17:45 crc kubenswrapper[5045]: I1125 23:17:45.538742 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-6szwq" event={"ID":"ddb0f925-b7f9-4daa-ad41-64b73c18a6cc","Type":"ContainerDied","Data":"15776c56598a7a3b12a1a16d02731d891849d73c9d23ea55f5cda4c9f25bb029"} Nov 25 23:17:45 crc kubenswrapper[5045]: I1125 23:17:45.542861 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-n2lvl" event={"ID":"e88e13a0-7cae-4c31-86c0-19526b581713","Type":"ContainerDied","Data":"31340781b71139b00284718cd78940e945c92beebba989b768167832215f346b"} Nov 25 23:17:45 crc kubenswrapper[5045]: I1125 23:17:45.542903 5045 scope.go:117] "RemoveContainer" containerID="0defa8acef3dc500e2170b1690f5377efa0ca3e374a3512c3fcd405d10d412b7" Nov 25 23:17:45 crc kubenswrapper[5045]: I1125 23:17:45.543021 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-n2lvl" Nov 25 23:17:45 crc kubenswrapper[5045]: I1125 23:17:45.575125 5045 scope.go:117] "RemoveContainer" containerID="d068bc3a96f8d4fb768f6ed6b046dc1daa0fe387a379eaa01fdbd15f1e23e5c5" Nov 25 23:17:45 crc kubenswrapper[5045]: I1125 23:17:45.601515 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-n2lvl"] Nov 25 23:17:45 crc kubenswrapper[5045]: I1125 23:17:45.619619 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-n2lvl"] Nov 25 23:17:46 crc kubenswrapper[5045]: I1125 23:17:46.413777 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e88e13a0-7cae-4c31-86c0-19526b581713" path="/var/lib/kubelet/pods/e88e13a0-7cae-4c31-86c0-19526b581713/volumes" Nov 25 23:17:46 crc kubenswrapper[5045]: I1125 23:17:46.875991 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-6szwq" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.029079 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddb0f925-b7f9-4daa-ad41-64b73c18a6cc-config-data\") pod \"ddb0f925-b7f9-4daa-ad41-64b73c18a6cc\" (UID: \"ddb0f925-b7f9-4daa-ad41-64b73c18a6cc\") " Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.029148 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddb0f925-b7f9-4daa-ad41-64b73c18a6cc-combined-ca-bundle\") pod \"ddb0f925-b7f9-4daa-ad41-64b73c18a6cc\" (UID: \"ddb0f925-b7f9-4daa-ad41-64b73c18a6cc\") " Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.029262 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ghx76\" (UniqueName: \"kubernetes.io/projected/ddb0f925-b7f9-4daa-ad41-64b73c18a6cc-kube-api-access-ghx76\") pod \"ddb0f925-b7f9-4daa-ad41-64b73c18a6cc\" (UID: \"ddb0f925-b7f9-4daa-ad41-64b73c18a6cc\") " Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.034624 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddb0f925-b7f9-4daa-ad41-64b73c18a6cc-kube-api-access-ghx76" (OuterVolumeSpecName: "kube-api-access-ghx76") pod "ddb0f925-b7f9-4daa-ad41-64b73c18a6cc" (UID: "ddb0f925-b7f9-4daa-ad41-64b73c18a6cc"). InnerVolumeSpecName "kube-api-access-ghx76". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.066523 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddb0f925-b7f9-4daa-ad41-64b73c18a6cc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ddb0f925-b7f9-4daa-ad41-64b73c18a6cc" (UID: "ddb0f925-b7f9-4daa-ad41-64b73c18a6cc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.082899 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddb0f925-b7f9-4daa-ad41-64b73c18a6cc-config-data" (OuterVolumeSpecName: "config-data") pod "ddb0f925-b7f9-4daa-ad41-64b73c18a6cc" (UID: "ddb0f925-b7f9-4daa-ad41-64b73c18a6cc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.131318 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ghx76\" (UniqueName: \"kubernetes.io/projected/ddb0f925-b7f9-4daa-ad41-64b73c18a6cc-kube-api-access-ghx76\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.131497 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddb0f925-b7f9-4daa-ad41-64b73c18a6cc-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.131601 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddb0f925-b7f9-4daa-ad41-64b73c18a6cc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.566581 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-6szwq" event={"ID":"ddb0f925-b7f9-4daa-ad41-64b73c18a6cc","Type":"ContainerDied","Data":"f971ae6fcac12566ec92cdf9fd933619384f65f8999d0015f881cf4c42e43adf"} Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.567026 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f971ae6fcac12566ec92cdf9fd933619384f65f8999d0015f881cf4c42e43adf" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.566694 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-6szwq" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.864298 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-2w9r5"] Nov 25 23:17:47 crc kubenswrapper[5045]: E1125 23:17:47.864689 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c12eb36-5101-42cd-85f1-b9cc0ff4dc89" containerName="mariadb-database-create" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.864728 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c12eb36-5101-42cd-85f1-b9cc0ff4dc89" containerName="mariadb-database-create" Nov 25 23:17:47 crc kubenswrapper[5045]: E1125 23:17:47.864741 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ce2c763-4353-4914-bc30-b4737ca1ffcf" containerName="mariadb-account-create-update" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.864750 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ce2c763-4353-4914-bc30-b4737ca1ffcf" containerName="mariadb-account-create-update" Nov 25 23:17:47 crc kubenswrapper[5045]: E1125 23:17:47.864770 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="416caee6-d98c-4f85-a3a7-e23594648a25" containerName="mariadb-database-create" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.864777 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="416caee6-d98c-4f85-a3a7-e23594648a25" containerName="mariadb-database-create" Nov 25 23:17:47 crc kubenswrapper[5045]: E1125 23:17:47.864784 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddb0f925-b7f9-4daa-ad41-64b73c18a6cc" containerName="keystone-db-sync" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.864791 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddb0f925-b7f9-4daa-ad41-64b73c18a6cc" containerName="keystone-db-sync" Nov 25 23:17:47 crc kubenswrapper[5045]: E1125 23:17:47.864799 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56361182-254c-43f6-893c-9f83d9942fe3" containerName="mariadb-account-create-update" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.864805 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="56361182-254c-43f6-893c-9f83d9942fe3" containerName="mariadb-account-create-update" Nov 25 23:17:47 crc kubenswrapper[5045]: E1125 23:17:47.864824 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e88e13a0-7cae-4c31-86c0-19526b581713" containerName="init" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.864831 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="e88e13a0-7cae-4c31-86c0-19526b581713" containerName="init" Nov 25 23:17:47 crc kubenswrapper[5045]: E1125 23:17:47.864843 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a79af51-25bb-4e3f-a735-c67a4bc01360" containerName="mariadb-database-create" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.864872 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a79af51-25bb-4e3f-a735-c67a4bc01360" containerName="mariadb-database-create" Nov 25 23:17:47 crc kubenswrapper[5045]: E1125 23:17:47.864885 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a371173-44b3-46c2-ad41-31b7387aca8a" containerName="mariadb-account-create-update" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.864892 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a371173-44b3-46c2-ad41-31b7387aca8a" containerName="mariadb-account-create-update" Nov 25 23:17:47 crc kubenswrapper[5045]: E1125 23:17:47.864901 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e88e13a0-7cae-4c31-86c0-19526b581713" containerName="dnsmasq-dns" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.864908 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="e88e13a0-7cae-4c31-86c0-19526b581713" containerName="dnsmasq-dns" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.865100 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="e88e13a0-7cae-4c31-86c0-19526b581713" containerName="dnsmasq-dns" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.865111 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="416caee6-d98c-4f85-a3a7-e23594648a25" containerName="mariadb-database-create" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.865122 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddb0f925-b7f9-4daa-ad41-64b73c18a6cc" containerName="keystone-db-sync" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.865135 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a79af51-25bb-4e3f-a735-c67a4bc01360" containerName="mariadb-database-create" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.865147 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ce2c763-4353-4914-bc30-b4737ca1ffcf" containerName="mariadb-account-create-update" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.865158 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="56361182-254c-43f6-893c-9f83d9942fe3" containerName="mariadb-account-create-update" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.865174 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c12eb36-5101-42cd-85f1-b9cc0ff4dc89" containerName="mariadb-database-create" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.865187 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a371173-44b3-46c2-ad41-31b7387aca8a" containerName="mariadb-account-create-update" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.865859 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.869009 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.869223 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-fgjn7" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.869357 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.869501 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.869657 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.877420 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-2w9r5"] Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.899695 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-67795cd9-tq9jm"] Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.912591 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67795cd9-tq9jm" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.953763 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-config\") pod \"dnsmasq-dns-67795cd9-tq9jm\" (UID: \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\") " pod="openstack/dnsmasq-dns-67795cd9-tq9jm" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.953823 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-ovsdbserver-nb\") pod \"dnsmasq-dns-67795cd9-tq9jm\" (UID: \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\") " pod="openstack/dnsmasq-dns-67795cd9-tq9jm" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.953850 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-dns-svc\") pod \"dnsmasq-dns-67795cd9-tq9jm\" (UID: \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\") " pod="openstack/dnsmasq-dns-67795cd9-tq9jm" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.954042 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qp4fg\" (UniqueName: \"kubernetes.io/projected/7515af80-0bc8-49dd-8375-e5617d8bdbc0-kube-api-access-qp4fg\") pod \"dnsmasq-dns-67795cd9-tq9jm\" (UID: \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\") " pod="openstack/dnsmasq-dns-67795cd9-tq9jm" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.954189 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-ovsdbserver-sb\") pod \"dnsmasq-dns-67795cd9-tq9jm\" (UID: \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\") " pod="openstack/dnsmasq-dns-67795cd9-tq9jm" Nov 25 23:17:47 crc kubenswrapper[5045]: I1125 23:17:47.973354 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67795cd9-tq9jm"] Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.061874 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-7fqmg"] Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.062938 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-7fqmg" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.064218 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-ovsdbserver-nb\") pod \"dnsmasq-dns-67795cd9-tq9jm\" (UID: \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\") " pod="openstack/dnsmasq-dns-67795cd9-tq9jm" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.064250 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-dns-svc\") pod \"dnsmasq-dns-67795cd9-tq9jm\" (UID: \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\") " pod="openstack/dnsmasq-dns-67795cd9-tq9jm" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.064281 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-config-data\") pod \"keystone-bootstrap-2w9r5\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.064306 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nc5g8\" (UniqueName: \"kubernetes.io/projected/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-kube-api-access-nc5g8\") pod \"keystone-bootstrap-2w9r5\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.064336 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qp4fg\" (UniqueName: \"kubernetes.io/projected/7515af80-0bc8-49dd-8375-e5617d8bdbc0-kube-api-access-qp4fg\") pod \"dnsmasq-dns-67795cd9-tq9jm\" (UID: \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\") " pod="openstack/dnsmasq-dns-67795cd9-tq9jm" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.064377 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-combined-ca-bundle\") pod \"keystone-bootstrap-2w9r5\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.064396 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-ovsdbserver-sb\") pod \"dnsmasq-dns-67795cd9-tq9jm\" (UID: \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\") " pod="openstack/dnsmasq-dns-67795cd9-tq9jm" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.064420 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-fernet-keys\") pod \"keystone-bootstrap-2w9r5\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.064454 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-scripts\") pod \"keystone-bootstrap-2w9r5\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.064479 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-credential-keys\") pod \"keystone-bootstrap-2w9r5\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.064498 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-config\") pod \"dnsmasq-dns-67795cd9-tq9jm\" (UID: \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\") " pod="openstack/dnsmasq-dns-67795cd9-tq9jm" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.065495 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-config\") pod \"dnsmasq-dns-67795cd9-tq9jm\" (UID: \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\") " pod="openstack/dnsmasq-dns-67795cd9-tq9jm" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.065978 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.066142 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-js5d4" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.066222 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-ovsdbserver-nb\") pod \"dnsmasq-dns-67795cd9-tq9jm\" (UID: \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\") " pod="openstack/dnsmasq-dns-67795cd9-tq9jm" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.066772 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-ovsdbserver-sb\") pod \"dnsmasq-dns-67795cd9-tq9jm\" (UID: \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\") " pod="openstack/dnsmasq-dns-67795cd9-tq9jm" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.068628 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.068675 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-dns-svc\") pod \"dnsmasq-dns-67795cd9-tq9jm\" (UID: \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\") " pod="openstack/dnsmasq-dns-67795cd9-tq9jm" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.086424 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-lr4xs"] Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.087611 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.092782 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-7jh9q" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.093015 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.098468 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.105805 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-7fqmg"] Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.116624 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qp4fg\" (UniqueName: \"kubernetes.io/projected/7515af80-0bc8-49dd-8375-e5617d8bdbc0-kube-api-access-qp4fg\") pod \"dnsmasq-dns-67795cd9-tq9jm\" (UID: \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\") " pod="openstack/dnsmasq-dns-67795cd9-tq9jm" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.151981 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-lr4xs"] Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.168206 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-scripts\") pod \"keystone-bootstrap-2w9r5\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.168256 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zj5c\" (UniqueName: \"kubernetes.io/projected/9344bf81-aa30-41d5-8dd4-49a7f32a0cfb-kube-api-access-2zj5c\") pod \"neutron-db-sync-7fqmg\" (UID: \"9344bf81-aa30-41d5-8dd4-49a7f32a0cfb\") " pod="openstack/neutron-db-sync-7fqmg" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.168283 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/483c4f92-701f-4dae-a00a-3a3d753d8c17-etc-machine-id\") pod \"cinder-db-sync-lr4xs\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.168301 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-credential-keys\") pod \"keystone-bootstrap-2w9r5\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.168317 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-config-data\") pod \"cinder-db-sync-lr4xs\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.168346 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/9344bf81-aa30-41d5-8dd4-49a7f32a0cfb-config\") pod \"neutron-db-sync-7fqmg\" (UID: \"9344bf81-aa30-41d5-8dd4-49a7f32a0cfb\") " pod="openstack/neutron-db-sync-7fqmg" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.168365 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-db-sync-config-data\") pod \"cinder-db-sync-lr4xs\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.168394 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-config-data\") pod \"keystone-bootstrap-2w9r5\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.168416 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nc5g8\" (UniqueName: \"kubernetes.io/projected/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-kube-api-access-nc5g8\") pod \"keystone-bootstrap-2w9r5\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.168456 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-combined-ca-bundle\") pod \"cinder-db-sync-lr4xs\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.168473 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9344bf81-aa30-41d5-8dd4-49a7f32a0cfb-combined-ca-bundle\") pod \"neutron-db-sync-7fqmg\" (UID: \"9344bf81-aa30-41d5-8dd4-49a7f32a0cfb\") " pod="openstack/neutron-db-sync-7fqmg" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.168494 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-combined-ca-bundle\") pod \"keystone-bootstrap-2w9r5\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.168519 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-fernet-keys\") pod \"keystone-bootstrap-2w9r5\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.168535 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-scripts\") pod \"cinder-db-sync-lr4xs\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.168551 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvslz\" (UniqueName: \"kubernetes.io/projected/483c4f92-701f-4dae-a00a-3a3d753d8c17-kube-api-access-zvslz\") pod \"cinder-db-sync-lr4xs\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.175427 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-credential-keys\") pod \"keystone-bootstrap-2w9r5\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.179407 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-config-data\") pod \"keystone-bootstrap-2w9r5\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.180309 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-combined-ca-bundle\") pod \"keystone-bootstrap-2w9r5\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.183878 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-n58vj"] Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.192112 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-scripts\") pod \"keystone-bootstrap-2w9r5\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.199565 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-n58vj" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.203232 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-b5hms" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.204084 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nc5g8\" (UniqueName: \"kubernetes.io/projected/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-kube-api-access-nc5g8\") pod \"keystone-bootstrap-2w9r5\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.204883 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.205350 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.205959 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-fernet-keys\") pod \"keystone-bootstrap-2w9r5\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.218243 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-n58vj"] Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.256956 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-gtpx4"] Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.258057 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-gtpx4" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.263133 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-njg9c" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.263330 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.268063 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67795cd9-tq9jm"] Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.268633 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67795cd9-tq9jm" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.276486 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-gtpx4"] Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.287835 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89-db-sync-config-data\") pod \"barbican-db-sync-gtpx4\" (UID: \"fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89\") " pod="openstack/barbican-db-sync-gtpx4" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.287890 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/9344bf81-aa30-41d5-8dd4-49a7f32a0cfb-config\") pod \"neutron-db-sync-7fqmg\" (UID: \"9344bf81-aa30-41d5-8dd4-49a7f32a0cfb\") " pod="openstack/neutron-db-sync-7fqmg" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.287913 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-db-sync-config-data\") pod \"cinder-db-sync-lr4xs\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.287947 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x47jp\" (UniqueName: \"kubernetes.io/projected/fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89-kube-api-access-x47jp\") pod \"barbican-db-sync-gtpx4\" (UID: \"fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89\") " pod="openstack/barbican-db-sync-gtpx4" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.288014 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-combined-ca-bundle\") pod \"cinder-db-sync-lr4xs\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.288031 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/889492c0-db80-43f6-9a4f-36292139f3df-scripts\") pod \"placement-db-sync-n58vj\" (UID: \"889492c0-db80-43f6-9a4f-36292139f3df\") " pod="openstack/placement-db-sync-n58vj" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.288049 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9344bf81-aa30-41d5-8dd4-49a7f32a0cfb-combined-ca-bundle\") pod \"neutron-db-sync-7fqmg\" (UID: \"9344bf81-aa30-41d5-8dd4-49a7f32a0cfb\") " pod="openstack/neutron-db-sync-7fqmg" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.288081 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mhr7\" (UniqueName: \"kubernetes.io/projected/889492c0-db80-43f6-9a4f-36292139f3df-kube-api-access-5mhr7\") pod \"placement-db-sync-n58vj\" (UID: \"889492c0-db80-43f6-9a4f-36292139f3df\") " pod="openstack/placement-db-sync-n58vj" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.288108 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-scripts\") pod \"cinder-db-sync-lr4xs\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.288124 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvslz\" (UniqueName: \"kubernetes.io/projected/483c4f92-701f-4dae-a00a-3a3d753d8c17-kube-api-access-zvslz\") pod \"cinder-db-sync-lr4xs\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.288164 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/889492c0-db80-43f6-9a4f-36292139f3df-config-data\") pod \"placement-db-sync-n58vj\" (UID: \"889492c0-db80-43f6-9a4f-36292139f3df\") " pod="openstack/placement-db-sync-n58vj" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.288179 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89-combined-ca-bundle\") pod \"barbican-db-sync-gtpx4\" (UID: \"fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89\") " pod="openstack/barbican-db-sync-gtpx4" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.288197 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zj5c\" (UniqueName: \"kubernetes.io/projected/9344bf81-aa30-41d5-8dd4-49a7f32a0cfb-kube-api-access-2zj5c\") pod \"neutron-db-sync-7fqmg\" (UID: \"9344bf81-aa30-41d5-8dd4-49a7f32a0cfb\") " pod="openstack/neutron-db-sync-7fqmg" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.288219 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/483c4f92-701f-4dae-a00a-3a3d753d8c17-etc-machine-id\") pod \"cinder-db-sync-lr4xs\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.288237 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-config-data\") pod \"cinder-db-sync-lr4xs\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.288253 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/889492c0-db80-43f6-9a4f-36292139f3df-combined-ca-bundle\") pod \"placement-db-sync-n58vj\" (UID: \"889492c0-db80-43f6-9a4f-36292139f3df\") " pod="openstack/placement-db-sync-n58vj" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.288268 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/889492c0-db80-43f6-9a4f-36292139f3df-logs\") pod \"placement-db-sync-n58vj\" (UID: \"889492c0-db80-43f6-9a4f-36292139f3df\") " pod="openstack/placement-db-sync-n58vj" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.294666 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b6dbdb6f5-47bq9"] Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.296003 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.303249 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-scripts\") pod \"cinder-db-sync-lr4xs\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.303851 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/483c4f92-701f-4dae-a00a-3a3d753d8c17-etc-machine-id\") pod \"cinder-db-sync-lr4xs\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.309680 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvslz\" (UniqueName: \"kubernetes.io/projected/483c4f92-701f-4dae-a00a-3a3d753d8c17-kube-api-access-zvslz\") pod \"cinder-db-sync-lr4xs\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.313212 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-config-data\") pod \"cinder-db-sync-lr4xs\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.314136 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9344bf81-aa30-41d5-8dd4-49a7f32a0cfb-combined-ca-bundle\") pod \"neutron-db-sync-7fqmg\" (UID: \"9344bf81-aa30-41d5-8dd4-49a7f32a0cfb\") " pod="openstack/neutron-db-sync-7fqmg" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.315268 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/9344bf81-aa30-41d5-8dd4-49a7f32a0cfb-config\") pod \"neutron-db-sync-7fqmg\" (UID: \"9344bf81-aa30-41d5-8dd4-49a7f32a0cfb\") " pod="openstack/neutron-db-sync-7fqmg" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.319470 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-combined-ca-bundle\") pod \"cinder-db-sync-lr4xs\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.326315 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-db-sync-config-data\") pod \"cinder-db-sync-lr4xs\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.332014 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b6dbdb6f5-47bq9"] Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.334783 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zj5c\" (UniqueName: \"kubernetes.io/projected/9344bf81-aa30-41d5-8dd4-49a7f32a0cfb-kube-api-access-2zj5c\") pod \"neutron-db-sync-7fqmg\" (UID: \"9344bf81-aa30-41d5-8dd4-49a7f32a0cfb\") " pod="openstack/neutron-db-sync-7fqmg" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.355529 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.357614 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.359696 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.359987 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.366608 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.389944 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/889492c0-db80-43f6-9a4f-36292139f3df-combined-ca-bundle\") pod \"placement-db-sync-n58vj\" (UID: \"889492c0-db80-43f6-9a4f-36292139f3df\") " pod="openstack/placement-db-sync-n58vj" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.389981 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjtvj\" (UniqueName: \"kubernetes.io/projected/82d48403-8197-427a-863b-e61cf561bb37-kube-api-access-hjtvj\") pod \"dnsmasq-dns-5b6dbdb6f5-47bq9\" (UID: \"82d48403-8197-427a-863b-e61cf561bb37\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.390002 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/889492c0-db80-43f6-9a4f-36292139f3df-logs\") pod \"placement-db-sync-n58vj\" (UID: \"889492c0-db80-43f6-9a4f-36292139f3df\") " pod="openstack/placement-db-sync-n58vj" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.390025 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89-db-sync-config-data\") pod \"barbican-db-sync-gtpx4\" (UID: \"fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89\") " pod="openstack/barbican-db-sync-gtpx4" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.390042 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-ovsdbserver-nb\") pod \"dnsmasq-dns-5b6dbdb6f5-47bq9\" (UID: \"82d48403-8197-427a-863b-e61cf561bb37\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.390066 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d264a9e9-964e-4764-bb10-466754e4e77a-run-httpd\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.390084 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x47jp\" (UniqueName: \"kubernetes.io/projected/fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89-kube-api-access-x47jp\") pod \"barbican-db-sync-gtpx4\" (UID: \"fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89\") " pod="openstack/barbican-db-sync-gtpx4" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.390119 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cm8nq\" (UniqueName: \"kubernetes.io/projected/d264a9e9-964e-4764-bb10-466754e4e77a-kube-api-access-cm8nq\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.390138 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-scripts\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.390158 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-config\") pod \"dnsmasq-dns-5b6dbdb6f5-47bq9\" (UID: \"82d48403-8197-427a-863b-e61cf561bb37\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.390172 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-dns-svc\") pod \"dnsmasq-dns-5b6dbdb6f5-47bq9\" (UID: \"82d48403-8197-427a-863b-e61cf561bb37\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.390191 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/889492c0-db80-43f6-9a4f-36292139f3df-scripts\") pod \"placement-db-sync-n58vj\" (UID: \"889492c0-db80-43f6-9a4f-36292139f3df\") " pod="openstack/placement-db-sync-n58vj" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.390217 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mhr7\" (UniqueName: \"kubernetes.io/projected/889492c0-db80-43f6-9a4f-36292139f3df-kube-api-access-5mhr7\") pod \"placement-db-sync-n58vj\" (UID: \"889492c0-db80-43f6-9a4f-36292139f3df\") " pod="openstack/placement-db-sync-n58vj" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.390232 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d264a9e9-964e-4764-bb10-466754e4e77a-log-httpd\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.390249 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.390264 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-config-data\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.390290 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.390322 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/889492c0-db80-43f6-9a4f-36292139f3df-config-data\") pod \"placement-db-sync-n58vj\" (UID: \"889492c0-db80-43f6-9a4f-36292139f3df\") " pod="openstack/placement-db-sync-n58vj" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.390338 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89-combined-ca-bundle\") pod \"barbican-db-sync-gtpx4\" (UID: \"fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89\") " pod="openstack/barbican-db-sync-gtpx4" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.390361 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-ovsdbserver-sb\") pod \"dnsmasq-dns-5b6dbdb6f5-47bq9\" (UID: \"82d48403-8197-427a-863b-e61cf561bb37\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.394195 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-7fqmg" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.395126 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/889492c0-db80-43f6-9a4f-36292139f3df-logs\") pod \"placement-db-sync-n58vj\" (UID: \"889492c0-db80-43f6-9a4f-36292139f3df\") " pod="openstack/placement-db-sync-n58vj" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.400426 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/889492c0-db80-43f6-9a4f-36292139f3df-combined-ca-bundle\") pod \"placement-db-sync-n58vj\" (UID: \"889492c0-db80-43f6-9a4f-36292139f3df\") " pod="openstack/placement-db-sync-n58vj" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.407415 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89-db-sync-config-data\") pod \"barbican-db-sync-gtpx4\" (UID: \"fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89\") " pod="openstack/barbican-db-sync-gtpx4" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.411844 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89-combined-ca-bundle\") pod \"barbican-db-sync-gtpx4\" (UID: \"fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89\") " pod="openstack/barbican-db-sync-gtpx4" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.414045 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/889492c0-db80-43f6-9a4f-36292139f3df-config-data\") pod \"placement-db-sync-n58vj\" (UID: \"889492c0-db80-43f6-9a4f-36292139f3df\") " pod="openstack/placement-db-sync-n58vj" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.419904 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.421432 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/889492c0-db80-43f6-9a4f-36292139f3df-scripts\") pod \"placement-db-sync-n58vj\" (UID: \"889492c0-db80-43f6-9a4f-36292139f3df\") " pod="openstack/placement-db-sync-n58vj" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.424060 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x47jp\" (UniqueName: \"kubernetes.io/projected/fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89-kube-api-access-x47jp\") pod \"barbican-db-sync-gtpx4\" (UID: \"fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89\") " pod="openstack/barbican-db-sync-gtpx4" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.426428 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mhr7\" (UniqueName: \"kubernetes.io/projected/889492c0-db80-43f6-9a4f-36292139f3df-kube-api-access-5mhr7\") pod \"placement-db-sync-n58vj\" (UID: \"889492c0-db80-43f6-9a4f-36292139f3df\") " pod="openstack/placement-db-sync-n58vj" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.491495 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.491892 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.492285 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-ovsdbserver-sb\") pod \"dnsmasq-dns-5b6dbdb6f5-47bq9\" (UID: \"82d48403-8197-427a-863b-e61cf561bb37\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.492367 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjtvj\" (UniqueName: \"kubernetes.io/projected/82d48403-8197-427a-863b-e61cf561bb37-kube-api-access-hjtvj\") pod \"dnsmasq-dns-5b6dbdb6f5-47bq9\" (UID: \"82d48403-8197-427a-863b-e61cf561bb37\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.492418 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-ovsdbserver-nb\") pod \"dnsmasq-dns-5b6dbdb6f5-47bq9\" (UID: \"82d48403-8197-427a-863b-e61cf561bb37\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.492439 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d264a9e9-964e-4764-bb10-466754e4e77a-run-httpd\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.492488 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cm8nq\" (UniqueName: \"kubernetes.io/projected/d264a9e9-964e-4764-bb10-466754e4e77a-kube-api-access-cm8nq\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.492507 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-scripts\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.492526 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-config\") pod \"dnsmasq-dns-5b6dbdb6f5-47bq9\" (UID: \"82d48403-8197-427a-863b-e61cf561bb37\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.492540 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-dns-svc\") pod \"dnsmasq-dns-5b6dbdb6f5-47bq9\" (UID: \"82d48403-8197-427a-863b-e61cf561bb37\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.492571 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d264a9e9-964e-4764-bb10-466754e4e77a-log-httpd\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.492586 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.492602 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-config-data\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.493204 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d264a9e9-964e-4764-bb10-466754e4e77a-run-httpd\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.504392 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-ovsdbserver-sb\") pod \"dnsmasq-dns-5b6dbdb6f5-47bq9\" (UID: \"82d48403-8197-427a-863b-e61cf561bb37\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.510507 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-config-data\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.510845 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-config\") pod \"dnsmasq-dns-5b6dbdb6f5-47bq9\" (UID: \"82d48403-8197-427a-863b-e61cf561bb37\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.510892 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-ovsdbserver-nb\") pod \"dnsmasq-dns-5b6dbdb6f5-47bq9\" (UID: \"82d48403-8197-427a-863b-e61cf561bb37\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.511116 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d264a9e9-964e-4764-bb10-466754e4e77a-log-httpd\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.511571 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.511792 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-dns-svc\") pod \"dnsmasq-dns-5b6dbdb6f5-47bq9\" (UID: \"82d48403-8197-427a-863b-e61cf561bb37\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.513880 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cm8nq\" (UniqueName: \"kubernetes.io/projected/d264a9e9-964e-4764-bb10-466754e4e77a-kube-api-access-cm8nq\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.514706 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-scripts\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.517875 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.523129 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjtvj\" (UniqueName: \"kubernetes.io/projected/82d48403-8197-427a-863b-e61cf561bb37-kube-api-access-hjtvj\") pod \"dnsmasq-dns-5b6dbdb6f5-47bq9\" (UID: \"82d48403-8197-427a-863b-e61cf561bb37\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.587197 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-n58vj" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.682834 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-gtpx4" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.714156 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.725884 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:17:48 crc kubenswrapper[5045]: W1125 23:17:48.874002 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7515af80_0bc8_49dd_8375_e5617d8bdbc0.slice/crio-0b9d6af86860c3c989333672b75b9c70f9fc7ec022ac61166b0e9af53880894a WatchSource:0}: Error finding container 0b9d6af86860c3c989333672b75b9c70f9fc7ec022ac61166b0e9af53880894a: Status 404 returned error can't find the container with id 0b9d6af86860c3c989333672b75b9c70f9fc7ec022ac61166b0e9af53880894a Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.878900 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67795cd9-tq9jm"] Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.967880 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-lr4xs"] Nov 25 23:17:48 crc kubenswrapper[5045]: I1125 23:17:48.976852 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-7fqmg"] Nov 25 23:17:48 crc kubenswrapper[5045]: W1125 23:17:48.986086 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9344bf81_aa30_41d5_8dd4_49a7f32a0cfb.slice/crio-8c35ae51005d884bdb81bccc764435f813034df5cb9df5c6a6c0f35bb70f825e WatchSource:0}: Error finding container 8c35ae51005d884bdb81bccc764435f813034df5cb9df5c6a6c0f35bb70f825e: Status 404 returned error can't find the container with id 8c35ae51005d884bdb81bccc764435f813034df5cb9df5c6a6c0f35bb70f825e Nov 25 23:17:49 crc kubenswrapper[5045]: I1125 23:17:49.091743 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-2w9r5"] Nov 25 23:17:49 crc kubenswrapper[5045]: W1125 23:17:49.100148 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb2bfdb44_64b1_44c6_83d9_15f23ffe6af3.slice/crio-1dbcbcc47e3106b7a558ea7877fe19ed62de38ca08c6fee5c24f409b857a8355 WatchSource:0}: Error finding container 1dbcbcc47e3106b7a558ea7877fe19ed62de38ca08c6fee5c24f409b857a8355: Status 404 returned error can't find the container with id 1dbcbcc47e3106b7a558ea7877fe19ed62de38ca08c6fee5c24f409b857a8355 Nov 25 23:17:49 crc kubenswrapper[5045]: I1125 23:17:49.336516 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-gtpx4"] Nov 25 23:17:49 crc kubenswrapper[5045]: I1125 23:17:49.346603 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b6dbdb6f5-47bq9"] Nov 25 23:17:49 crc kubenswrapper[5045]: I1125 23:17:49.357562 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-n58vj"] Nov 25 23:17:49 crc kubenswrapper[5045]: W1125 23:17:49.368887 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod889492c0_db80_43f6_9a4f_36292139f3df.slice/crio-b47fa67a8a311d6a93d29782358d0a9754ae3149831de254cb89b5db4e35b27e WatchSource:0}: Error finding container b47fa67a8a311d6a93d29782358d0a9754ae3149831de254cb89b5db4e35b27e: Status 404 returned error can't find the container with id b47fa67a8a311d6a93d29782358d0a9754ae3149831de254cb89b5db4e35b27e Nov 25 23:17:49 crc kubenswrapper[5045]: W1125 23:17:49.557811 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd264a9e9_964e_4764_bb10_466754e4e77a.slice/crio-7cc38552084b60007175c36f80e498f6cfd23b5112c9ece206e8398c46d20552 WatchSource:0}: Error finding container 7cc38552084b60007175c36f80e498f6cfd23b5112c9ece206e8398c46d20552: Status 404 returned error can't find the container with id 7cc38552084b60007175c36f80e498f6cfd23b5112c9ece206e8398c46d20552 Nov 25 23:17:49 crc kubenswrapper[5045]: I1125 23:17:49.560453 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:17:49 crc kubenswrapper[5045]: I1125 23:17:49.595296 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-2w9r5" event={"ID":"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3","Type":"ContainerStarted","Data":"1dbcbcc47e3106b7a558ea7877fe19ed62de38ca08c6fee5c24f409b857a8355"} Nov 25 23:17:49 crc kubenswrapper[5045]: I1125 23:17:49.598354 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-gtpx4" event={"ID":"fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89","Type":"ContainerStarted","Data":"137a57d00199e4f16370f45f66c8c0f622c8b7336f89882e2c30a85cb75cba44"} Nov 25 23:17:49 crc kubenswrapper[5045]: I1125 23:17:49.600664 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d264a9e9-964e-4764-bb10-466754e4e77a","Type":"ContainerStarted","Data":"7cc38552084b60007175c36f80e498f6cfd23b5112c9ece206e8398c46d20552"} Nov 25 23:17:49 crc kubenswrapper[5045]: I1125 23:17:49.603405 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-7fqmg" event={"ID":"9344bf81-aa30-41d5-8dd4-49a7f32a0cfb","Type":"ContainerStarted","Data":"8c35ae51005d884bdb81bccc764435f813034df5cb9df5c6a6c0f35bb70f825e"} Nov 25 23:17:49 crc kubenswrapper[5045]: I1125 23:17:49.605573 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67795cd9-tq9jm" event={"ID":"7515af80-0bc8-49dd-8375-e5617d8bdbc0","Type":"ContainerStarted","Data":"0b9d6af86860c3c989333672b75b9c70f9fc7ec022ac61166b0e9af53880894a"} Nov 25 23:17:49 crc kubenswrapper[5045]: I1125 23:17:49.606951 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" event={"ID":"82d48403-8197-427a-863b-e61cf561bb37","Type":"ContainerStarted","Data":"a07c421f049fda9873cb2be2ae47e276a2e6a77d70d35e13a26550a19d2637d0"} Nov 25 23:17:49 crc kubenswrapper[5045]: I1125 23:17:49.608395 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-n58vj" event={"ID":"889492c0-db80-43f6-9a4f-36292139f3df","Type":"ContainerStarted","Data":"b47fa67a8a311d6a93d29782358d0a9754ae3149831de254cb89b5db4e35b27e"} Nov 25 23:17:49 crc kubenswrapper[5045]: I1125 23:17:49.610190 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-lr4xs" event={"ID":"483c4f92-701f-4dae-a00a-3a3d753d8c17","Type":"ContainerStarted","Data":"6657ae27fc98f1c822c387706514d87f9bd2d2bc73b9b968a29d604d9a32d140"} Nov 25 23:17:50 crc kubenswrapper[5045]: I1125 23:17:50.000540 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:17:51 crc kubenswrapper[5045]: I1125 23:17:51.630318 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-7fqmg" event={"ID":"9344bf81-aa30-41d5-8dd4-49a7f32a0cfb","Type":"ContainerStarted","Data":"314cf21683fc34621c621510ec7c149462274c3133fefe34402ab7acf029d3ae"} Nov 25 23:17:51 crc kubenswrapper[5045]: I1125 23:17:51.638340 5045 generic.go:334] "Generic (PLEG): container finished" podID="7515af80-0bc8-49dd-8375-e5617d8bdbc0" containerID="e2bd40f81e2871fdbe3f13adefb77ee96024d091879ef42df8b124d91e22da8e" exitCode=0 Nov 25 23:17:51 crc kubenswrapper[5045]: I1125 23:17:51.638438 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67795cd9-tq9jm" event={"ID":"7515af80-0bc8-49dd-8375-e5617d8bdbc0","Type":"ContainerDied","Data":"e2bd40f81e2871fdbe3f13adefb77ee96024d091879ef42df8b124d91e22da8e"} Nov 25 23:17:51 crc kubenswrapper[5045]: I1125 23:17:51.643879 5045 generic.go:334] "Generic (PLEG): container finished" podID="82d48403-8197-427a-863b-e61cf561bb37" containerID="abd1a15115a3d987092837c8cc5d0b4816050b031381d6b125fd330d96e3e349" exitCode=0 Nov 25 23:17:51 crc kubenswrapper[5045]: I1125 23:17:51.643985 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" event={"ID":"82d48403-8197-427a-863b-e61cf561bb37","Type":"ContainerDied","Data":"abd1a15115a3d987092837c8cc5d0b4816050b031381d6b125fd330d96e3e349"} Nov 25 23:17:51 crc kubenswrapper[5045]: I1125 23:17:51.673280 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-7fqmg" podStartSLOduration=3.673260519 podStartE2EDuration="3.673260519s" podCreationTimestamp="2025-11-25 23:17:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:17:51.659196305 +0000 UTC m=+1128.016855417" watchObservedRunningTime="2025-11-25 23:17:51.673260519 +0000 UTC m=+1128.030919641" Nov 25 23:17:51 crc kubenswrapper[5045]: I1125 23:17:51.682064 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-2w9r5" event={"ID":"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3","Type":"ContainerStarted","Data":"c7cff6530f101a05ce21cecd01136de0bd933cc5cf7d052324eae5f192674055"} Nov 25 23:17:51 crc kubenswrapper[5045]: I1125 23:17:51.787817 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-2w9r5" podStartSLOduration=4.787795759 podStartE2EDuration="4.787795759s" podCreationTimestamp="2025-11-25 23:17:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:17:51.748068729 +0000 UTC m=+1128.105727841" watchObservedRunningTime="2025-11-25 23:17:51.787795759 +0000 UTC m=+1128.145454871" Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.095500 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67795cd9-tq9jm" Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.161999 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-ovsdbserver-nb\") pod \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\" (UID: \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\") " Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.162093 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qp4fg\" (UniqueName: \"kubernetes.io/projected/7515af80-0bc8-49dd-8375-e5617d8bdbc0-kube-api-access-qp4fg\") pod \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\" (UID: \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\") " Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.162173 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-ovsdbserver-sb\") pod \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\" (UID: \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\") " Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.162305 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-dns-svc\") pod \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\" (UID: \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\") " Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.162345 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-config\") pod \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\" (UID: \"7515af80-0bc8-49dd-8375-e5617d8bdbc0\") " Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.169878 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7515af80-0bc8-49dd-8375-e5617d8bdbc0-kube-api-access-qp4fg" (OuterVolumeSpecName: "kube-api-access-qp4fg") pod "7515af80-0bc8-49dd-8375-e5617d8bdbc0" (UID: "7515af80-0bc8-49dd-8375-e5617d8bdbc0"). InnerVolumeSpecName "kube-api-access-qp4fg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.184633 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7515af80-0bc8-49dd-8375-e5617d8bdbc0" (UID: "7515af80-0bc8-49dd-8375-e5617d8bdbc0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.186948 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7515af80-0bc8-49dd-8375-e5617d8bdbc0" (UID: "7515af80-0bc8-49dd-8375-e5617d8bdbc0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.189245 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7515af80-0bc8-49dd-8375-e5617d8bdbc0" (UID: "7515af80-0bc8-49dd-8375-e5617d8bdbc0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.190570 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-config" (OuterVolumeSpecName: "config") pod "7515af80-0bc8-49dd-8375-e5617d8bdbc0" (UID: "7515af80-0bc8-49dd-8375-e5617d8bdbc0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.263901 5045 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.263933 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.263952 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.263967 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qp4fg\" (UniqueName: \"kubernetes.io/projected/7515af80-0bc8-49dd-8375-e5617d8bdbc0-kube-api-access-qp4fg\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.263977 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7515af80-0bc8-49dd-8375-e5617d8bdbc0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.691396 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67795cd9-tq9jm" event={"ID":"7515af80-0bc8-49dd-8375-e5617d8bdbc0","Type":"ContainerDied","Data":"0b9d6af86860c3c989333672b75b9c70f9fc7ec022ac61166b0e9af53880894a"} Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.691410 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67795cd9-tq9jm" Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.691774 5045 scope.go:117] "RemoveContainer" containerID="e2bd40f81e2871fdbe3f13adefb77ee96024d091879ef42df8b124d91e22da8e" Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.693651 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" event={"ID":"82d48403-8197-427a-863b-e61cf561bb37","Type":"ContainerStarted","Data":"21db3166f2085838b8b44d91f5c21c6f374f6b0a22d88f84baf9eac0120a5cd3"} Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.693982 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.752158 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" podStartSLOduration=4.75214104 podStartE2EDuration="4.75214104s" podCreationTimestamp="2025-11-25 23:17:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:17:52.720787589 +0000 UTC m=+1129.078446701" watchObservedRunningTime="2025-11-25 23:17:52.75214104 +0000 UTC m=+1129.109800152" Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.775102 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67795cd9-tq9jm"] Nov 25 23:17:52 crc kubenswrapper[5045]: I1125 23:17:52.780942 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-67795cd9-tq9jm"] Nov 25 23:17:54 crc kubenswrapper[5045]: I1125 23:17:54.410242 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7515af80-0bc8-49dd-8375-e5617d8bdbc0" path="/var/lib/kubelet/pods/7515af80-0bc8-49dd-8375-e5617d8bdbc0/volumes" Nov 25 23:17:54 crc kubenswrapper[5045]: I1125 23:17:54.729190 5045 generic.go:334] "Generic (PLEG): container finished" podID="b2bfdb44-64b1-44c6-83d9-15f23ffe6af3" containerID="c7cff6530f101a05ce21cecd01136de0bd933cc5cf7d052324eae5f192674055" exitCode=0 Nov 25 23:17:54 crc kubenswrapper[5045]: I1125 23:17:54.729232 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-2w9r5" event={"ID":"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3","Type":"ContainerDied","Data":"c7cff6530f101a05ce21cecd01136de0bd933cc5cf7d052324eae5f192674055"} Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.587431 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.648274 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-combined-ca-bundle\") pod \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.648410 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-credential-keys\") pod \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.648466 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-fernet-keys\") pod \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.648509 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nc5g8\" (UniqueName: \"kubernetes.io/projected/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-kube-api-access-nc5g8\") pod \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.648568 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-config-data\") pod \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.648592 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-scripts\") pod \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\" (UID: \"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3\") " Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.658829 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-scripts" (OuterVolumeSpecName: "scripts") pod "b2bfdb44-64b1-44c6-83d9-15f23ffe6af3" (UID: "b2bfdb44-64b1-44c6-83d9-15f23ffe6af3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.659986 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-kube-api-access-nc5g8" (OuterVolumeSpecName: "kube-api-access-nc5g8") pod "b2bfdb44-64b1-44c6-83d9-15f23ffe6af3" (UID: "b2bfdb44-64b1-44c6-83d9-15f23ffe6af3"). InnerVolumeSpecName "kube-api-access-nc5g8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.663300 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b2bfdb44-64b1-44c6-83d9-15f23ffe6af3" (UID: "b2bfdb44-64b1-44c6-83d9-15f23ffe6af3"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.664839 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "b2bfdb44-64b1-44c6-83d9-15f23ffe6af3" (UID: "b2bfdb44-64b1-44c6-83d9-15f23ffe6af3"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.676833 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b2bfdb44-64b1-44c6-83d9-15f23ffe6af3" (UID: "b2bfdb44-64b1-44c6-83d9-15f23ffe6af3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.682465 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-config-data" (OuterVolumeSpecName: "config-data") pod "b2bfdb44-64b1-44c6-83d9-15f23ffe6af3" (UID: "b2bfdb44-64b1-44c6-83d9-15f23ffe6af3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.750020 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nc5g8\" (UniqueName: \"kubernetes.io/projected/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-kube-api-access-nc5g8\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.750050 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.750060 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.750068 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.750076 5045 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.750085 5045 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.752235 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-2w9r5" event={"ID":"b2bfdb44-64b1-44c6-83d9-15f23ffe6af3","Type":"ContainerDied","Data":"1dbcbcc47e3106b7a558ea7877fe19ed62de38ca08c6fee5c24f409b857a8355"} Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.752274 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1dbcbcc47e3106b7a558ea7877fe19ed62de38ca08c6fee5c24f409b857a8355" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.752313 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-2w9r5" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.812985 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-2w9r5"] Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.819381 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-2w9r5"] Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.917158 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-lht5b"] Nov 25 23:17:56 crc kubenswrapper[5045]: E1125 23:17:56.917997 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2bfdb44-64b1-44c6-83d9-15f23ffe6af3" containerName="keystone-bootstrap" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.918166 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2bfdb44-64b1-44c6-83d9-15f23ffe6af3" containerName="keystone-bootstrap" Nov 25 23:17:56 crc kubenswrapper[5045]: E1125 23:17:56.918309 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7515af80-0bc8-49dd-8375-e5617d8bdbc0" containerName="init" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.918427 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="7515af80-0bc8-49dd-8375-e5617d8bdbc0" containerName="init" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.918781 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2bfdb44-64b1-44c6-83d9-15f23ffe6af3" containerName="keystone-bootstrap" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.918902 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="7515af80-0bc8-49dd-8375-e5617d8bdbc0" containerName="init" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.919781 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.923547 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-fgjn7" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.923878 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.924214 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.924585 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.924925 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.932292 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-lht5b"] Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.962784 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-credential-keys\") pod \"keystone-bootstrap-lht5b\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.962998 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-combined-ca-bundle\") pod \"keystone-bootstrap-lht5b\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.963140 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-fernet-keys\") pod \"keystone-bootstrap-lht5b\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.963232 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgwf7\" (UniqueName: \"kubernetes.io/projected/abd9d902-3ba4-49d6-900e-9411bdd8b222-kube-api-access-xgwf7\") pod \"keystone-bootstrap-lht5b\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.963311 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-scripts\") pod \"keystone-bootstrap-lht5b\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:17:56 crc kubenswrapper[5045]: I1125 23:17:56.963444 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-config-data\") pod \"keystone-bootstrap-lht5b\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:17:57 crc kubenswrapper[5045]: I1125 23:17:57.065926 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-credential-keys\") pod \"keystone-bootstrap-lht5b\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:17:57 crc kubenswrapper[5045]: I1125 23:17:57.066155 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-combined-ca-bundle\") pod \"keystone-bootstrap-lht5b\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:17:57 crc kubenswrapper[5045]: I1125 23:17:57.066241 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-fernet-keys\") pod \"keystone-bootstrap-lht5b\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:17:57 crc kubenswrapper[5045]: I1125 23:17:57.066298 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgwf7\" (UniqueName: \"kubernetes.io/projected/abd9d902-3ba4-49d6-900e-9411bdd8b222-kube-api-access-xgwf7\") pod \"keystone-bootstrap-lht5b\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:17:57 crc kubenswrapper[5045]: I1125 23:17:57.066328 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-scripts\") pod \"keystone-bootstrap-lht5b\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:17:57 crc kubenswrapper[5045]: I1125 23:17:57.066454 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-config-data\") pod \"keystone-bootstrap-lht5b\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:17:57 crc kubenswrapper[5045]: I1125 23:17:57.070516 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-combined-ca-bundle\") pod \"keystone-bootstrap-lht5b\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:17:57 crc kubenswrapper[5045]: I1125 23:17:57.070781 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-fernet-keys\") pod \"keystone-bootstrap-lht5b\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:17:57 crc kubenswrapper[5045]: I1125 23:17:57.071525 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-scripts\") pod \"keystone-bootstrap-lht5b\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:17:57 crc kubenswrapper[5045]: I1125 23:17:57.073190 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-config-data\") pod \"keystone-bootstrap-lht5b\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:17:57 crc kubenswrapper[5045]: I1125 23:17:57.083418 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgwf7\" (UniqueName: \"kubernetes.io/projected/abd9d902-3ba4-49d6-900e-9411bdd8b222-kube-api-access-xgwf7\") pod \"keystone-bootstrap-lht5b\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:17:57 crc kubenswrapper[5045]: I1125 23:17:57.097094 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-credential-keys\") pod \"keystone-bootstrap-lht5b\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:17:57 crc kubenswrapper[5045]: I1125 23:17:57.287187 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:17:58 crc kubenswrapper[5045]: I1125 23:17:58.405668 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2bfdb44-64b1-44c6-83d9-15f23ffe6af3" path="/var/lib/kubelet/pods/b2bfdb44-64b1-44c6-83d9-15f23ffe6af3/volumes" Nov 25 23:17:58 crc kubenswrapper[5045]: I1125 23:17:58.715841 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:17:58 crc kubenswrapper[5045]: I1125 23:17:58.787407 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-554567b4f7-6m45j"] Nov 25 23:17:58 crc kubenswrapper[5045]: I1125 23:17:58.787727 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-554567b4f7-6m45j" podUID="5055ae4f-3f89-4770-830f-0486ee87709f" containerName="dnsmasq-dns" containerID="cri-o://b2645a5e64c645dc30be18d75e252a66962927ec3df855aeb2b5123aae296645" gracePeriod=10 Nov 25 23:17:59 crc kubenswrapper[5045]: I1125 23:17:59.131897 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-554567b4f7-6m45j" podUID="5055ae4f-3f89-4770-830f-0486ee87709f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.122:5353: connect: connection refused" Nov 25 23:17:59 crc kubenswrapper[5045]: I1125 23:17:59.781370 5045 generic.go:334] "Generic (PLEG): container finished" podID="5055ae4f-3f89-4770-830f-0486ee87709f" containerID="b2645a5e64c645dc30be18d75e252a66962927ec3df855aeb2b5123aae296645" exitCode=0 Nov 25 23:17:59 crc kubenswrapper[5045]: I1125 23:17:59.781471 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-554567b4f7-6m45j" event={"ID":"5055ae4f-3f89-4770-830f-0486ee87709f","Type":"ContainerDied","Data":"b2645a5e64c645dc30be18d75e252a66962927ec3df855aeb2b5123aae296645"} Nov 25 23:18:00 crc kubenswrapper[5045]: I1125 23:18:00.541131 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:18:00 crc kubenswrapper[5045]: I1125 23:18:00.541185 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:18:04 crc kubenswrapper[5045]: E1125 23:18:04.811264 5045 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Nov 25 23:18:04 crc kubenswrapper[5045]: E1125 23:18:04.811615 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5mhr7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-n58vj_openstack(889492c0-db80-43f6-9a4f-36292139f3df): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 23:18:04 crc kubenswrapper[5045]: E1125 23:18:04.812763 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-n58vj" podUID="889492c0-db80-43f6-9a4f-36292139f3df" Nov 25 23:18:04 crc kubenswrapper[5045]: E1125 23:18:04.826157 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-n58vj" podUID="889492c0-db80-43f6-9a4f-36292139f3df" Nov 25 23:18:09 crc kubenswrapper[5045]: I1125 23:18:09.131842 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-554567b4f7-6m45j" podUID="5055ae4f-3f89-4770-830f-0486ee87709f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.122:5353: i/o timeout" Nov 25 23:18:12 crc kubenswrapper[5045]: I1125 23:18:12.888555 5045 generic.go:334] "Generic (PLEG): container finished" podID="9344bf81-aa30-41d5-8dd4-49a7f32a0cfb" containerID="314cf21683fc34621c621510ec7c149462274c3133fefe34402ab7acf029d3ae" exitCode=0 Nov 25 23:18:12 crc kubenswrapper[5045]: I1125 23:18:12.888700 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-7fqmg" event={"ID":"9344bf81-aa30-41d5-8dd4-49a7f32a0cfb","Type":"ContainerDied","Data":"314cf21683fc34621c621510ec7c149462274c3133fefe34402ab7acf029d3ae"} Nov 25 23:18:13 crc kubenswrapper[5045]: E1125 23:18:13.113844 5045 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 25 23:18:13 crc kubenswrapper[5045]: E1125 23:18:13.113994 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zvslz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-lr4xs_openstack(483c4f92-701f-4dae-a00a-3a3d753d8c17): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 23:18:13 crc kubenswrapper[5045]: E1125 23:18:13.115341 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-lr4xs" podUID="483c4f92-701f-4dae-a00a-3a3d753d8c17" Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.159521 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.258308 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-config\") pod \"5055ae4f-3f89-4770-830f-0486ee87709f\" (UID: \"5055ae4f-3f89-4770-830f-0486ee87709f\") " Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.258378 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-ovsdbserver-nb\") pod \"5055ae4f-3f89-4770-830f-0486ee87709f\" (UID: \"5055ae4f-3f89-4770-830f-0486ee87709f\") " Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.258544 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-dns-svc\") pod \"5055ae4f-3f89-4770-830f-0486ee87709f\" (UID: \"5055ae4f-3f89-4770-830f-0486ee87709f\") " Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.258582 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-ovsdbserver-sb\") pod \"5055ae4f-3f89-4770-830f-0486ee87709f\" (UID: \"5055ae4f-3f89-4770-830f-0486ee87709f\") " Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.258615 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dpm8\" (UniqueName: \"kubernetes.io/projected/5055ae4f-3f89-4770-830f-0486ee87709f-kube-api-access-9dpm8\") pod \"5055ae4f-3f89-4770-830f-0486ee87709f\" (UID: \"5055ae4f-3f89-4770-830f-0486ee87709f\") " Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.272980 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5055ae4f-3f89-4770-830f-0486ee87709f-kube-api-access-9dpm8" (OuterVolumeSpecName: "kube-api-access-9dpm8") pod "5055ae4f-3f89-4770-830f-0486ee87709f" (UID: "5055ae4f-3f89-4770-830f-0486ee87709f"). InnerVolumeSpecName "kube-api-access-9dpm8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.357537 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5055ae4f-3f89-4770-830f-0486ee87709f" (UID: "5055ae4f-3f89-4770-830f-0486ee87709f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.360564 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.360857 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dpm8\" (UniqueName: \"kubernetes.io/projected/5055ae4f-3f89-4770-830f-0486ee87709f-kube-api-access-9dpm8\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.369290 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5055ae4f-3f89-4770-830f-0486ee87709f" (UID: "5055ae4f-3f89-4770-830f-0486ee87709f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.393309 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-config" (OuterVolumeSpecName: "config") pod "5055ae4f-3f89-4770-830f-0486ee87709f" (UID: "5055ae4f-3f89-4770-830f-0486ee87709f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.400354 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5055ae4f-3f89-4770-830f-0486ee87709f" (UID: "5055ae4f-3f89-4770-830f-0486ee87709f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.463247 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.463310 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.463332 5045 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5055ae4f-3f89-4770-830f-0486ee87709f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.542389 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.550332 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-lht5b"] Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.904382 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-gtpx4" event={"ID":"fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89","Type":"ContainerStarted","Data":"ad5850ba451f04b8f864d1f1bbdc82c5226df462911276b55f5a45a2baa92f95"} Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.908025 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d264a9e9-964e-4764-bb10-466754e4e77a","Type":"ContainerStarted","Data":"d7f1317ff1ae899968a4be7a67d042903b769b8e9101e48303f747617094e99d"} Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.913769 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lht5b" event={"ID":"abd9d902-3ba4-49d6-900e-9411bdd8b222","Type":"ContainerStarted","Data":"24840b389f34f85a70f3bf69b11dac4081793ecf6272c219857be45fbd35d044"} Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.913823 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lht5b" event={"ID":"abd9d902-3ba4-49d6-900e-9411bdd8b222","Type":"ContainerStarted","Data":"2ceb4d29063780986e4cc2131a775748a18761bb931425d2660586a2216bdc80"} Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.921506 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-554567b4f7-6m45j" Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.924611 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-554567b4f7-6m45j" event={"ID":"5055ae4f-3f89-4770-830f-0486ee87709f","Type":"ContainerDied","Data":"3e5289e66525150e2224f081fea0a18565c5b8232ddc2aa1ccf783b8d04fb5f5"} Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.924686 5045 scope.go:117] "RemoveContainer" containerID="b2645a5e64c645dc30be18d75e252a66962927ec3df855aeb2b5123aae296645" Nov 25 23:18:13 crc kubenswrapper[5045]: E1125 23:18:13.932089 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-lr4xs" podUID="483c4f92-701f-4dae-a00a-3a3d753d8c17" Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.939304 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-gtpx4" podStartSLOduration=2.207060026 podStartE2EDuration="25.939273709s" podCreationTimestamp="2025-11-25 23:17:48 +0000 UTC" firstStartedPulling="2025-11-25 23:17:49.350203394 +0000 UTC m=+1125.707862507" lastFinishedPulling="2025-11-25 23:18:13.082417078 +0000 UTC m=+1149.440076190" observedRunningTime="2025-11-25 23:18:13.9250375 +0000 UTC m=+1150.282696652" watchObservedRunningTime="2025-11-25 23:18:13.939273709 +0000 UTC m=+1150.296932821" Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.972879 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-lht5b" podStartSLOduration=17.972857764 podStartE2EDuration="17.972857764s" podCreationTimestamp="2025-11-25 23:17:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:18:13.971522956 +0000 UTC m=+1150.329182108" watchObservedRunningTime="2025-11-25 23:18:13.972857764 +0000 UTC m=+1150.330516876" Nov 25 23:18:13 crc kubenswrapper[5045]: I1125 23:18:13.977586 5045 scope.go:117] "RemoveContainer" containerID="e256fda05dbc9ba68182176daf859eb373e91a0000e664bebd787c72c3836dd1" Nov 25 23:18:14 crc kubenswrapper[5045]: I1125 23:18:14.006770 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-554567b4f7-6m45j"] Nov 25 23:18:14 crc kubenswrapper[5045]: I1125 23:18:14.019005 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-554567b4f7-6m45j"] Nov 25 23:18:14 crc kubenswrapper[5045]: I1125 23:18:14.134197 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-554567b4f7-6m45j" podUID="5055ae4f-3f89-4770-830f-0486ee87709f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.122:5353: i/o timeout" Nov 25 23:18:14 crc kubenswrapper[5045]: I1125 23:18:14.293148 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-7fqmg" Nov 25 23:18:14 crc kubenswrapper[5045]: I1125 23:18:14.379580 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2zj5c\" (UniqueName: \"kubernetes.io/projected/9344bf81-aa30-41d5-8dd4-49a7f32a0cfb-kube-api-access-2zj5c\") pod \"9344bf81-aa30-41d5-8dd4-49a7f32a0cfb\" (UID: \"9344bf81-aa30-41d5-8dd4-49a7f32a0cfb\") " Nov 25 23:18:14 crc kubenswrapper[5045]: I1125 23:18:14.379746 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9344bf81-aa30-41d5-8dd4-49a7f32a0cfb-combined-ca-bundle\") pod \"9344bf81-aa30-41d5-8dd4-49a7f32a0cfb\" (UID: \"9344bf81-aa30-41d5-8dd4-49a7f32a0cfb\") " Nov 25 23:18:14 crc kubenswrapper[5045]: I1125 23:18:14.379807 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/9344bf81-aa30-41d5-8dd4-49a7f32a0cfb-config\") pod \"9344bf81-aa30-41d5-8dd4-49a7f32a0cfb\" (UID: \"9344bf81-aa30-41d5-8dd4-49a7f32a0cfb\") " Nov 25 23:18:14 crc kubenswrapper[5045]: I1125 23:18:14.384044 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9344bf81-aa30-41d5-8dd4-49a7f32a0cfb-kube-api-access-2zj5c" (OuterVolumeSpecName: "kube-api-access-2zj5c") pod "9344bf81-aa30-41d5-8dd4-49a7f32a0cfb" (UID: "9344bf81-aa30-41d5-8dd4-49a7f32a0cfb"). InnerVolumeSpecName "kube-api-access-2zj5c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:18:14 crc kubenswrapper[5045]: I1125 23:18:14.412926 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9344bf81-aa30-41d5-8dd4-49a7f32a0cfb-config" (OuterVolumeSpecName: "config") pod "9344bf81-aa30-41d5-8dd4-49a7f32a0cfb" (UID: "9344bf81-aa30-41d5-8dd4-49a7f32a0cfb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:14 crc kubenswrapper[5045]: I1125 23:18:14.427679 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9344bf81-aa30-41d5-8dd4-49a7f32a0cfb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9344bf81-aa30-41d5-8dd4-49a7f32a0cfb" (UID: "9344bf81-aa30-41d5-8dd4-49a7f32a0cfb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:14 crc kubenswrapper[5045]: I1125 23:18:14.438009 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5055ae4f-3f89-4770-830f-0486ee87709f" path="/var/lib/kubelet/pods/5055ae4f-3f89-4770-830f-0486ee87709f/volumes" Nov 25 23:18:14 crc kubenswrapper[5045]: I1125 23:18:14.482152 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2zj5c\" (UniqueName: \"kubernetes.io/projected/9344bf81-aa30-41d5-8dd4-49a7f32a0cfb-kube-api-access-2zj5c\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:14 crc kubenswrapper[5045]: I1125 23:18:14.482191 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9344bf81-aa30-41d5-8dd4-49a7f32a0cfb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:14 crc kubenswrapper[5045]: I1125 23:18:14.482203 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/9344bf81-aa30-41d5-8dd4-49a7f32a0cfb-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:14 crc kubenswrapper[5045]: I1125 23:18:14.957525 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-7fqmg" event={"ID":"9344bf81-aa30-41d5-8dd4-49a7f32a0cfb","Type":"ContainerDied","Data":"8c35ae51005d884bdb81bccc764435f813034df5cb9df5c6a6c0f35bb70f825e"} Nov 25 23:18:14 crc kubenswrapper[5045]: I1125 23:18:14.957908 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8c35ae51005d884bdb81bccc764435f813034df5cb9df5c6a6c0f35bb70f825e" Nov 25 23:18:14 crc kubenswrapper[5045]: I1125 23:18:14.958019 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-7fqmg" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.094905 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f66db59b9-69528"] Nov 25 23:18:15 crc kubenswrapper[5045]: E1125 23:18:15.095722 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5055ae4f-3f89-4770-830f-0486ee87709f" containerName="init" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.095799 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="5055ae4f-3f89-4770-830f-0486ee87709f" containerName="init" Nov 25 23:18:15 crc kubenswrapper[5045]: E1125 23:18:15.095866 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5055ae4f-3f89-4770-830f-0486ee87709f" containerName="dnsmasq-dns" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.095917 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="5055ae4f-3f89-4770-830f-0486ee87709f" containerName="dnsmasq-dns" Nov 25 23:18:15 crc kubenswrapper[5045]: E1125 23:18:15.096002 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9344bf81-aa30-41d5-8dd4-49a7f32a0cfb" containerName="neutron-db-sync" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.096055 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="9344bf81-aa30-41d5-8dd4-49a7f32a0cfb" containerName="neutron-db-sync" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.096263 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="5055ae4f-3f89-4770-830f-0486ee87709f" containerName="dnsmasq-dns" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.096328 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="9344bf81-aa30-41d5-8dd4-49a7f32a0cfb" containerName="neutron-db-sync" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.097218 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.107233 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f66db59b9-69528"] Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.171894 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-64bf94568b-qzrfh"] Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.173223 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.180371 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.180940 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.181099 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.181523 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-js5d4" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.190564 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-httpd-config\") pod \"neutron-64bf94568b-qzrfh\" (UID: \"55785523-a194-47c5-ad63-5955cc73241e\") " pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.190607 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-dns-svc\") pod \"dnsmasq-dns-5f66db59b9-69528\" (UID: \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\") " pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.190634 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4fmqv\" (UniqueName: \"kubernetes.io/projected/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-kube-api-access-4fmqv\") pod \"dnsmasq-dns-5f66db59b9-69528\" (UID: \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\") " pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.190667 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-combined-ca-bundle\") pod \"neutron-64bf94568b-qzrfh\" (UID: \"55785523-a194-47c5-ad63-5955cc73241e\") " pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.190687 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-ovsdbserver-nb\") pod \"dnsmasq-dns-5f66db59b9-69528\" (UID: \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\") " pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.190723 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-config\") pod \"dnsmasq-dns-5f66db59b9-69528\" (UID: \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\") " pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.190740 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-ovndb-tls-certs\") pod \"neutron-64bf94568b-qzrfh\" (UID: \"55785523-a194-47c5-ad63-5955cc73241e\") " pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.190768 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwfqw\" (UniqueName: \"kubernetes.io/projected/55785523-a194-47c5-ad63-5955cc73241e-kube-api-access-mwfqw\") pod \"neutron-64bf94568b-qzrfh\" (UID: \"55785523-a194-47c5-ad63-5955cc73241e\") " pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.190797 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-config\") pod \"neutron-64bf94568b-qzrfh\" (UID: \"55785523-a194-47c5-ad63-5955cc73241e\") " pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.190821 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-ovsdbserver-sb\") pod \"dnsmasq-dns-5f66db59b9-69528\" (UID: \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\") " pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.194343 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-64bf94568b-qzrfh"] Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.292123 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwfqw\" (UniqueName: \"kubernetes.io/projected/55785523-a194-47c5-ad63-5955cc73241e-kube-api-access-mwfqw\") pod \"neutron-64bf94568b-qzrfh\" (UID: \"55785523-a194-47c5-ad63-5955cc73241e\") " pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.292451 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-config\") pod \"neutron-64bf94568b-qzrfh\" (UID: \"55785523-a194-47c5-ad63-5955cc73241e\") " pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.292486 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-ovsdbserver-sb\") pod \"dnsmasq-dns-5f66db59b9-69528\" (UID: \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\") " pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.292524 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-httpd-config\") pod \"neutron-64bf94568b-qzrfh\" (UID: \"55785523-a194-47c5-ad63-5955cc73241e\") " pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.292561 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-dns-svc\") pod \"dnsmasq-dns-5f66db59b9-69528\" (UID: \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\") " pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.292599 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4fmqv\" (UniqueName: \"kubernetes.io/projected/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-kube-api-access-4fmqv\") pod \"dnsmasq-dns-5f66db59b9-69528\" (UID: \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\") " pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.292640 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-combined-ca-bundle\") pod \"neutron-64bf94568b-qzrfh\" (UID: \"55785523-a194-47c5-ad63-5955cc73241e\") " pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.292663 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-ovsdbserver-nb\") pod \"dnsmasq-dns-5f66db59b9-69528\" (UID: \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\") " pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.292694 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-config\") pod \"dnsmasq-dns-5f66db59b9-69528\" (UID: \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\") " pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.292731 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-ovndb-tls-certs\") pod \"neutron-64bf94568b-qzrfh\" (UID: \"55785523-a194-47c5-ad63-5955cc73241e\") " pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.293500 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-config\") pod \"dnsmasq-dns-5f66db59b9-69528\" (UID: \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\") " pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.294100 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-ovsdbserver-nb\") pod \"dnsmasq-dns-5f66db59b9-69528\" (UID: \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\") " pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.294832 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-ovsdbserver-sb\") pod \"dnsmasq-dns-5f66db59b9-69528\" (UID: \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\") " pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.295175 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-dns-svc\") pod \"dnsmasq-dns-5f66db59b9-69528\" (UID: \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\") " pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.296401 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-ovndb-tls-certs\") pod \"neutron-64bf94568b-qzrfh\" (UID: \"55785523-a194-47c5-ad63-5955cc73241e\") " pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.297320 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-httpd-config\") pod \"neutron-64bf94568b-qzrfh\" (UID: \"55785523-a194-47c5-ad63-5955cc73241e\") " pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.297745 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-combined-ca-bundle\") pod \"neutron-64bf94568b-qzrfh\" (UID: \"55785523-a194-47c5-ad63-5955cc73241e\") " pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.298730 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-config\") pod \"neutron-64bf94568b-qzrfh\" (UID: \"55785523-a194-47c5-ad63-5955cc73241e\") " pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.311841 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwfqw\" (UniqueName: \"kubernetes.io/projected/55785523-a194-47c5-ad63-5955cc73241e-kube-api-access-mwfqw\") pod \"neutron-64bf94568b-qzrfh\" (UID: \"55785523-a194-47c5-ad63-5955cc73241e\") " pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.319009 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4fmqv\" (UniqueName: \"kubernetes.io/projected/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-kube-api-access-4fmqv\") pod \"dnsmasq-dns-5f66db59b9-69528\" (UID: \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\") " pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.446784 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.511324 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:15 crc kubenswrapper[5045]: I1125 23:18:15.976506 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d264a9e9-964e-4764-bb10-466754e4e77a","Type":"ContainerStarted","Data":"bdec37632121d3535f3dfdb6b0610b5374b313598ecb6fcf11b0ddb7a5f880eb"} Nov 25 23:18:16 crc kubenswrapper[5045]: I1125 23:18:16.025184 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f66db59b9-69528"] Nov 25 23:18:16 crc kubenswrapper[5045]: W1125 23:18:16.030753 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b4a1c6b_ec7c_49bc_b4dd_617c4caf2b74.slice/crio-5133d587603829532ef62ecd41e668f8d5f7c63565b38cc625e256fa5a82ab28 WatchSource:0}: Error finding container 5133d587603829532ef62ecd41e668f8d5f7c63565b38cc625e256fa5a82ab28: Status 404 returned error can't find the container with id 5133d587603829532ef62ecd41e668f8d5f7c63565b38cc625e256fa5a82ab28 Nov 25 23:18:16 crc kubenswrapper[5045]: I1125 23:18:16.193208 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-64bf94568b-qzrfh"] Nov 25 23:18:16 crc kubenswrapper[5045]: W1125 23:18:16.202613 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55785523_a194_47c5_ad63_5955cc73241e.slice/crio-54398baf8d6207d0d1d80fc3d8a177563d05f00ff5a3c1891363a1c874539125 WatchSource:0}: Error finding container 54398baf8d6207d0d1d80fc3d8a177563d05f00ff5a3c1891363a1c874539125: Status 404 returned error can't find the container with id 54398baf8d6207d0d1d80fc3d8a177563d05f00ff5a3c1891363a1c874539125 Nov 25 23:18:16 crc kubenswrapper[5045]: I1125 23:18:16.986374 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64bf94568b-qzrfh" event={"ID":"55785523-a194-47c5-ad63-5955cc73241e","Type":"ContainerStarted","Data":"a09aea8b0c2a3c94c914667901dfd3ee6b1d501d9295f8c98796f23ab77d197d"} Nov 25 23:18:16 crc kubenswrapper[5045]: I1125 23:18:16.986738 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:16 crc kubenswrapper[5045]: I1125 23:18:16.986753 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64bf94568b-qzrfh" event={"ID":"55785523-a194-47c5-ad63-5955cc73241e","Type":"ContainerStarted","Data":"6bf2d5e4a5c4e921dddf93c952627bd84c8581f90c30f336ea367eca29affe72"} Nov 25 23:18:16 crc kubenswrapper[5045]: I1125 23:18:16.986765 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64bf94568b-qzrfh" event={"ID":"55785523-a194-47c5-ad63-5955cc73241e","Type":"ContainerStarted","Data":"54398baf8d6207d0d1d80fc3d8a177563d05f00ff5a3c1891363a1c874539125"} Nov 25 23:18:16 crc kubenswrapper[5045]: I1125 23:18:16.989067 5045 generic.go:334] "Generic (PLEG): container finished" podID="fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89" containerID="ad5850ba451f04b8f864d1f1bbdc82c5226df462911276b55f5a45a2baa92f95" exitCode=0 Nov 25 23:18:16 crc kubenswrapper[5045]: I1125 23:18:16.989167 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-gtpx4" event={"ID":"fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89","Type":"ContainerDied","Data":"ad5850ba451f04b8f864d1f1bbdc82c5226df462911276b55f5a45a2baa92f95"} Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.001383 5045 generic.go:334] "Generic (PLEG): container finished" podID="2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74" containerID="b1e0e21681339bc3ebf86fa757fdc3aeaf79beb20895a929d78e5fedf25ec126" exitCode=0 Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.001454 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f66db59b9-69528" event={"ID":"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74","Type":"ContainerDied","Data":"b1e0e21681339bc3ebf86fa757fdc3aeaf79beb20895a929d78e5fedf25ec126"} Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.001516 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f66db59b9-69528" event={"ID":"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74","Type":"ContainerStarted","Data":"5133d587603829532ef62ecd41e668f8d5f7c63565b38cc625e256fa5a82ab28"} Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.005115 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-64bf94568b-qzrfh" podStartSLOduration=2.005099856 podStartE2EDuration="2.005099856s" podCreationTimestamp="2025-11-25 23:18:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:18:17.004706515 +0000 UTC m=+1153.362365627" watchObservedRunningTime="2025-11-25 23:18:17.005099856 +0000 UTC m=+1153.362758968" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.586222 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5db8cdc695-2sz2g"] Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.588651 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.594140 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.594528 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.604078 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5db8cdc695-2sz2g"] Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.742117 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6970371c-e072-49ea-97b5-a6bed28d5372-httpd-config\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.742353 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6970371c-e072-49ea-97b5-a6bed28d5372-public-tls-certs\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.742468 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6970371c-e072-49ea-97b5-a6bed28d5372-combined-ca-bundle\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.742490 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6970371c-e072-49ea-97b5-a6bed28d5372-ovndb-tls-certs\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.742529 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phgtg\" (UniqueName: \"kubernetes.io/projected/6970371c-e072-49ea-97b5-a6bed28d5372-kube-api-access-phgtg\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.742547 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6970371c-e072-49ea-97b5-a6bed28d5372-internal-tls-certs\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.742647 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6970371c-e072-49ea-97b5-a6bed28d5372-config\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.843813 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6970371c-e072-49ea-97b5-a6bed28d5372-public-tls-certs\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.843891 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6970371c-e072-49ea-97b5-a6bed28d5372-combined-ca-bundle\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.843919 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6970371c-e072-49ea-97b5-a6bed28d5372-ovndb-tls-certs\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.843951 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phgtg\" (UniqueName: \"kubernetes.io/projected/6970371c-e072-49ea-97b5-a6bed28d5372-kube-api-access-phgtg\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.843975 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6970371c-e072-49ea-97b5-a6bed28d5372-internal-tls-certs\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.844053 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6970371c-e072-49ea-97b5-a6bed28d5372-config\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.844140 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6970371c-e072-49ea-97b5-a6bed28d5372-httpd-config\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.850570 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6970371c-e072-49ea-97b5-a6bed28d5372-combined-ca-bundle\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.850775 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6970371c-e072-49ea-97b5-a6bed28d5372-ovndb-tls-certs\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.851122 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6970371c-e072-49ea-97b5-a6bed28d5372-public-tls-certs\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.852044 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6970371c-e072-49ea-97b5-a6bed28d5372-internal-tls-certs\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.852597 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/6970371c-e072-49ea-97b5-a6bed28d5372-config\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.856008 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6970371c-e072-49ea-97b5-a6bed28d5372-httpd-config\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.861069 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phgtg\" (UniqueName: \"kubernetes.io/projected/6970371c-e072-49ea-97b5-a6bed28d5372-kube-api-access-phgtg\") pod \"neutron-5db8cdc695-2sz2g\" (UID: \"6970371c-e072-49ea-97b5-a6bed28d5372\") " pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:17 crc kubenswrapper[5045]: I1125 23:18:17.918053 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:18 crc kubenswrapper[5045]: I1125 23:18:18.011275 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f66db59b9-69528" event={"ID":"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74","Type":"ContainerStarted","Data":"ba5c95998aee863daad136e4adbdff3a75bff32e0bb79706ddc25d0ddbaa6aa7"} Nov 25 23:18:18 crc kubenswrapper[5045]: I1125 23:18:18.011405 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:18 crc kubenswrapper[5045]: I1125 23:18:18.012634 5045 generic.go:334] "Generic (PLEG): container finished" podID="abd9d902-3ba4-49d6-900e-9411bdd8b222" containerID="24840b389f34f85a70f3bf69b11dac4081793ecf6272c219857be45fbd35d044" exitCode=0 Nov 25 23:18:18 crc kubenswrapper[5045]: I1125 23:18:18.012707 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lht5b" event={"ID":"abd9d902-3ba4-49d6-900e-9411bdd8b222","Type":"ContainerDied","Data":"24840b389f34f85a70f3bf69b11dac4081793ecf6272c219857be45fbd35d044"} Nov 25 23:18:18 crc kubenswrapper[5045]: I1125 23:18:18.034348 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5f66db59b9-69528" podStartSLOduration=3.034329592 podStartE2EDuration="3.034329592s" podCreationTimestamp="2025-11-25 23:18:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:18:18.029158403 +0000 UTC m=+1154.386817525" watchObservedRunningTime="2025-11-25 23:18:18.034329592 +0000 UTC m=+1154.391988704" Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.345493 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-gtpx4" Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.464119 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.514418 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89-combined-ca-bundle\") pod \"fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89\" (UID: \"fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89\") " Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.514501 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x47jp\" (UniqueName: \"kubernetes.io/projected/fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89-kube-api-access-x47jp\") pod \"fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89\" (UID: \"fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89\") " Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.514556 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89-db-sync-config-data\") pod \"fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89\" (UID: \"fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89\") " Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.519250 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89-kube-api-access-x47jp" (OuterVolumeSpecName: "kube-api-access-x47jp") pod "fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89" (UID: "fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89"). InnerVolumeSpecName "kube-api-access-x47jp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.519441 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89" (UID: "fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.550266 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89" (UID: "fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.629684 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-credential-keys\") pod \"abd9d902-3ba4-49d6-900e-9411bdd8b222\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.629950 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-config-data\") pod \"abd9d902-3ba4-49d6-900e-9411bdd8b222\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.630033 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-fernet-keys\") pod \"abd9d902-3ba4-49d6-900e-9411bdd8b222\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.633087 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-scripts\") pod \"abd9d902-3ba4-49d6-900e-9411bdd8b222\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.633179 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xgwf7\" (UniqueName: \"kubernetes.io/projected/abd9d902-3ba4-49d6-900e-9411bdd8b222-kube-api-access-xgwf7\") pod \"abd9d902-3ba4-49d6-900e-9411bdd8b222\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.633223 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-combined-ca-bundle\") pod \"abd9d902-3ba4-49d6-900e-9411bdd8b222\" (UID: \"abd9d902-3ba4-49d6-900e-9411bdd8b222\") " Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.633918 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "abd9d902-3ba4-49d6-900e-9411bdd8b222" (UID: "abd9d902-3ba4-49d6-900e-9411bdd8b222"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.634127 5045 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.634147 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.634159 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x47jp\" (UniqueName: \"kubernetes.io/projected/fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89-kube-api-access-x47jp\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.634173 5045 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.634230 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "abd9d902-3ba4-49d6-900e-9411bdd8b222" (UID: "abd9d902-3ba4-49d6-900e-9411bdd8b222"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.636897 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/abd9d902-3ba4-49d6-900e-9411bdd8b222-kube-api-access-xgwf7" (OuterVolumeSpecName: "kube-api-access-xgwf7") pod "abd9d902-3ba4-49d6-900e-9411bdd8b222" (UID: "abd9d902-3ba4-49d6-900e-9411bdd8b222"). InnerVolumeSpecName "kube-api-access-xgwf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.638415 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-scripts" (OuterVolumeSpecName: "scripts") pod "abd9d902-3ba4-49d6-900e-9411bdd8b222" (UID: "abd9d902-3ba4-49d6-900e-9411bdd8b222"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.663956 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-config-data" (OuterVolumeSpecName: "config-data") pod "abd9d902-3ba4-49d6-900e-9411bdd8b222" (UID: "abd9d902-3ba4-49d6-900e-9411bdd8b222"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.670465 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "abd9d902-3ba4-49d6-900e-9411bdd8b222" (UID: "abd9d902-3ba4-49d6-900e-9411bdd8b222"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.735333 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.735370 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xgwf7\" (UniqueName: \"kubernetes.io/projected/abd9d902-3ba4-49d6-900e-9411bdd8b222-kube-api-access-xgwf7\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.735383 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.735394 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.735405 5045 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/abd9d902-3ba4-49d6-900e-9411bdd8b222-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:21 crc kubenswrapper[5045]: W1125 23:18:21.749402 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6970371c_e072_49ea_97b5_a6bed28d5372.slice/crio-85d62456d064ca5388f1c3f0d5370904c4663741ec3331bace74a6ac74ad8626 WatchSource:0}: Error finding container 85d62456d064ca5388f1c3f0d5370904c4663741ec3331bace74a6ac74ad8626: Status 404 returned error can't find the container with id 85d62456d064ca5388f1c3f0d5370904c4663741ec3331bace74a6ac74ad8626 Nov 25 23:18:21 crc kubenswrapper[5045]: I1125 23:18:21.750440 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5db8cdc695-2sz2g"] Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.071962 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-gtpx4" event={"ID":"fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89","Type":"ContainerDied","Data":"137a57d00199e4f16370f45f66c8c0f622c8b7336f89882e2c30a85cb75cba44"} Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.072002 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="137a57d00199e4f16370f45f66c8c0f622c8b7336f89882e2c30a85cb75cba44" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.072309 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-gtpx4" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.084917 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d264a9e9-964e-4764-bb10-466754e4e77a","Type":"ContainerStarted","Data":"6c90ba0300d881d80fc270ac5bd982676a4eb5a0535df1f5587476ba47223757"} Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.087529 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lht5b" event={"ID":"abd9d902-3ba4-49d6-900e-9411bdd8b222","Type":"ContainerDied","Data":"2ceb4d29063780986e4cc2131a775748a18761bb931425d2660586a2216bdc80"} Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.087574 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2ceb4d29063780986e4cc2131a775748a18761bb931425d2660586a2216bdc80" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.087541 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lht5b" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.089598 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-n58vj" event={"ID":"889492c0-db80-43f6-9a4f-36292139f3df","Type":"ContainerStarted","Data":"ab401352773cd5b2d5ca78e559ec38054346c749e815f0733ab5e4d3b18524a8"} Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.098039 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5db8cdc695-2sz2g" event={"ID":"6970371c-e072-49ea-97b5-a6bed28d5372","Type":"ContainerStarted","Data":"85d62456d064ca5388f1c3f0d5370904c4663741ec3331bace74a6ac74ad8626"} Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.128029 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-n58vj" podStartSLOduration=2.282409312 podStartE2EDuration="34.128001075s" podCreationTimestamp="2025-11-25 23:17:48 +0000 UTC" firstStartedPulling="2025-11-25 23:17:49.381270317 +0000 UTC m=+1125.738929429" lastFinishedPulling="2025-11-25 23:18:21.22686207 +0000 UTC m=+1157.584521192" observedRunningTime="2025-11-25 23:18:22.118060119 +0000 UTC m=+1158.475719271" watchObservedRunningTime="2025-11-25 23:18:22.128001075 +0000 UTC m=+1158.485660197" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.576845 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-68c57f7894-dq5jz"] Nov 25 23:18:22 crc kubenswrapper[5045]: E1125 23:18:22.577432 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abd9d902-3ba4-49d6-900e-9411bdd8b222" containerName="keystone-bootstrap" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.577445 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="abd9d902-3ba4-49d6-900e-9411bdd8b222" containerName="keystone-bootstrap" Nov 25 23:18:22 crc kubenswrapper[5045]: E1125 23:18:22.577460 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89" containerName="barbican-db-sync" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.577467 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89" containerName="barbican-db-sync" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.577645 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89" containerName="barbican-db-sync" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.577667 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="abd9d902-3ba4-49d6-900e-9411bdd8b222" containerName="keystone-bootstrap" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.578292 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.583677 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.583898 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.583946 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.584116 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-fgjn7" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.584292 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.599257 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.599579 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-f86f47df6-cktpw"] Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.600921 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.605622 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.605837 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-njg9c" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.605971 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.614962 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-68c57f7894-dq5jz"] Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.650173 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fjxm\" (UniqueName: \"kubernetes.io/projected/e85b05a0-5d74-4df9-b09c-a68596f45b6e-kube-api-access-5fjxm\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.650262 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/930a6fb5-dcf0-455c-97a7-5446766b0d01-logs\") pod \"barbican-keystone-listener-f86f47df6-cktpw\" (UID: \"930a6fb5-dcf0-455c-97a7-5446766b0d01\") " pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.650289 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/930a6fb5-dcf0-455c-97a7-5446766b0d01-combined-ca-bundle\") pod \"barbican-keystone-listener-f86f47df6-cktpw\" (UID: \"930a6fb5-dcf0-455c-97a7-5446766b0d01\") " pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.650308 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-config-data\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.650333 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-internal-tls-certs\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.650367 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-combined-ca-bundle\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.650385 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/930a6fb5-dcf0-455c-97a7-5446766b0d01-config-data\") pod \"barbican-keystone-listener-f86f47df6-cktpw\" (UID: \"930a6fb5-dcf0-455c-97a7-5446766b0d01\") " pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.650406 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/930a6fb5-dcf0-455c-97a7-5446766b0d01-config-data-custom\") pod \"barbican-keystone-listener-f86f47df6-cktpw\" (UID: \"930a6fb5-dcf0-455c-97a7-5446766b0d01\") " pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.650420 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-credential-keys\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.650445 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-scripts\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.650471 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-public-tls-certs\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.650494 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22zw7\" (UniqueName: \"kubernetes.io/projected/930a6fb5-dcf0-455c-97a7-5446766b0d01-kube-api-access-22zw7\") pod \"barbican-keystone-listener-f86f47df6-cktpw\" (UID: \"930a6fb5-dcf0-455c-97a7-5446766b0d01\") " pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.650514 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-fernet-keys\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.658837 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-f86f47df6-cktpw"] Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.749415 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-679bb9cf9-plnhs"] Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.751868 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-679bb9cf9-plnhs" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.752729 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/930a6fb5-dcf0-455c-97a7-5446766b0d01-logs\") pod \"barbican-keystone-listener-f86f47df6-cktpw\" (UID: \"930a6fb5-dcf0-455c-97a7-5446766b0d01\") " pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.752772 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/930a6fb5-dcf0-455c-97a7-5446766b0d01-combined-ca-bundle\") pod \"barbican-keystone-listener-f86f47df6-cktpw\" (UID: \"930a6fb5-dcf0-455c-97a7-5446766b0d01\") " pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.752795 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-config-data\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.752819 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-internal-tls-certs\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.752848 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-combined-ca-bundle\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.752871 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/930a6fb5-dcf0-455c-97a7-5446766b0d01-config-data\") pod \"barbican-keystone-listener-f86f47df6-cktpw\" (UID: \"930a6fb5-dcf0-455c-97a7-5446766b0d01\") " pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.752890 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b77ab75f-32f2-4664-a48e-76699f609a7b-config-data-custom\") pod \"barbican-worker-679bb9cf9-plnhs\" (UID: \"b77ab75f-32f2-4664-a48e-76699f609a7b\") " pod="openstack/barbican-worker-679bb9cf9-plnhs" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.752911 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/930a6fb5-dcf0-455c-97a7-5446766b0d01-config-data-custom\") pod \"barbican-keystone-listener-f86f47df6-cktpw\" (UID: \"930a6fb5-dcf0-455c-97a7-5446766b0d01\") " pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.752928 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-credential-keys\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.752956 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-scripts\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.752979 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b77ab75f-32f2-4664-a48e-76699f609a7b-combined-ca-bundle\") pod \"barbican-worker-679bb9cf9-plnhs\" (UID: \"b77ab75f-32f2-4664-a48e-76699f609a7b\") " pod="openstack/barbican-worker-679bb9cf9-plnhs" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.753000 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b77ab75f-32f2-4664-a48e-76699f609a7b-config-data\") pod \"barbican-worker-679bb9cf9-plnhs\" (UID: \"b77ab75f-32f2-4664-a48e-76699f609a7b\") " pod="openstack/barbican-worker-679bb9cf9-plnhs" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.753017 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-public-tls-certs\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.753042 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22zw7\" (UniqueName: \"kubernetes.io/projected/930a6fb5-dcf0-455c-97a7-5446766b0d01-kube-api-access-22zw7\") pod \"barbican-keystone-listener-f86f47df6-cktpw\" (UID: \"930a6fb5-dcf0-455c-97a7-5446766b0d01\") " pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.753065 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-fernet-keys\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.753086 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mggsf\" (UniqueName: \"kubernetes.io/projected/b77ab75f-32f2-4664-a48e-76699f609a7b-kube-api-access-mggsf\") pod \"barbican-worker-679bb9cf9-plnhs\" (UID: \"b77ab75f-32f2-4664-a48e-76699f609a7b\") " pod="openstack/barbican-worker-679bb9cf9-plnhs" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.753103 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fjxm\" (UniqueName: \"kubernetes.io/projected/e85b05a0-5d74-4df9-b09c-a68596f45b6e-kube-api-access-5fjxm\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.753123 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b77ab75f-32f2-4664-a48e-76699f609a7b-logs\") pod \"barbican-worker-679bb9cf9-plnhs\" (UID: \"b77ab75f-32f2-4664-a48e-76699f609a7b\") " pod="openstack/barbican-worker-679bb9cf9-plnhs" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.759043 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.765116 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-scripts\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.765801 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-679bb9cf9-plnhs"] Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.766448 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/930a6fb5-dcf0-455c-97a7-5446766b0d01-logs\") pod \"barbican-keystone-listener-f86f47df6-cktpw\" (UID: \"930a6fb5-dcf0-455c-97a7-5446766b0d01\") " pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.778843 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-combined-ca-bundle\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.779346 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-public-tls-certs\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.782935 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22zw7\" (UniqueName: \"kubernetes.io/projected/930a6fb5-dcf0-455c-97a7-5446766b0d01-kube-api-access-22zw7\") pod \"barbican-keystone-listener-f86f47df6-cktpw\" (UID: \"930a6fb5-dcf0-455c-97a7-5446766b0d01\") " pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.783271 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-fernet-keys\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.788760 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f66db59b9-69528"] Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.788992 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5f66db59b9-69528" podUID="2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74" containerName="dnsmasq-dns" containerID="cri-o://ba5c95998aee863daad136e4adbdff3a75bff32e0bb79706ddc25d0ddbaa6aa7" gracePeriod=10 Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.802697 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-internal-tls-certs\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.818114 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-869f779d85-s5jgk"] Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.819554 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.823362 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.823563 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-credential-keys\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.823603 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e85b05a0-5d74-4df9-b09c-a68596f45b6e-config-data\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.823992 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/930a6fb5-dcf0-455c-97a7-5446766b0d01-config-data\") pod \"barbican-keystone-listener-f86f47df6-cktpw\" (UID: \"930a6fb5-dcf0-455c-97a7-5446766b0d01\") " pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.824389 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/930a6fb5-dcf0-455c-97a7-5446766b0d01-combined-ca-bundle\") pod \"barbican-keystone-listener-f86f47df6-cktpw\" (UID: \"930a6fb5-dcf0-455c-97a7-5446766b0d01\") " pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.824411 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/930a6fb5-dcf0-455c-97a7-5446766b0d01-config-data-custom\") pod \"barbican-keystone-listener-f86f47df6-cktpw\" (UID: \"930a6fb5-dcf0-455c-97a7-5446766b0d01\") " pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.830425 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fjxm\" (UniqueName: \"kubernetes.io/projected/e85b05a0-5d74-4df9-b09c-a68596f45b6e-kube-api-access-5fjxm\") pod \"keystone-68c57f7894-dq5jz\" (UID: \"e85b05a0-5d74-4df9-b09c-a68596f45b6e\") " pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.896674 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b77ab75f-32f2-4664-a48e-76699f609a7b-config-data-custom\") pod \"barbican-worker-679bb9cf9-plnhs\" (UID: \"b77ab75f-32f2-4664-a48e-76699f609a7b\") " pod="openstack/barbican-worker-679bb9cf9-plnhs" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.896828 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-dns-svc\") pod \"dnsmasq-dns-869f779d85-s5jgk\" (UID: \"391a1333-2eca-4967-9d3f-713c564c0bfe\") " pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.896915 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b77ab75f-32f2-4664-a48e-76699f609a7b-combined-ca-bundle\") pod \"barbican-worker-679bb9cf9-plnhs\" (UID: \"b77ab75f-32f2-4664-a48e-76699f609a7b\") " pod="openstack/barbican-worker-679bb9cf9-plnhs" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.897911 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b77ab75f-32f2-4664-a48e-76699f609a7b-config-data\") pod \"barbican-worker-679bb9cf9-plnhs\" (UID: \"b77ab75f-32f2-4664-a48e-76699f609a7b\") " pod="openstack/barbican-worker-679bb9cf9-plnhs" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.897956 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-config\") pod \"dnsmasq-dns-869f779d85-s5jgk\" (UID: \"391a1333-2eca-4967-9d3f-713c564c0bfe\") " pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.898021 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9g7xr\" (UniqueName: \"kubernetes.io/projected/391a1333-2eca-4967-9d3f-713c564c0bfe-kube-api-access-9g7xr\") pod \"dnsmasq-dns-869f779d85-s5jgk\" (UID: \"391a1333-2eca-4967-9d3f-713c564c0bfe\") " pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.898142 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mggsf\" (UniqueName: \"kubernetes.io/projected/b77ab75f-32f2-4664-a48e-76699f609a7b-kube-api-access-mggsf\") pod \"barbican-worker-679bb9cf9-plnhs\" (UID: \"b77ab75f-32f2-4664-a48e-76699f609a7b\") " pod="openstack/barbican-worker-679bb9cf9-plnhs" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.898196 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b77ab75f-32f2-4664-a48e-76699f609a7b-logs\") pod \"barbican-worker-679bb9cf9-plnhs\" (UID: \"b77ab75f-32f2-4664-a48e-76699f609a7b\") " pod="openstack/barbican-worker-679bb9cf9-plnhs" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.898838 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-ovsdbserver-sb\") pod \"dnsmasq-dns-869f779d85-s5jgk\" (UID: \"391a1333-2eca-4967-9d3f-713c564c0bfe\") " pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.899031 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-ovsdbserver-nb\") pod \"dnsmasq-dns-869f779d85-s5jgk\" (UID: \"391a1333-2eca-4967-9d3f-713c564c0bfe\") " pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.900542 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b77ab75f-32f2-4664-a48e-76699f609a7b-logs\") pod \"barbican-worker-679bb9cf9-plnhs\" (UID: \"b77ab75f-32f2-4664-a48e-76699f609a7b\") " pod="openstack/barbican-worker-679bb9cf9-plnhs" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.906384 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-869f779d85-s5jgk"] Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.914751 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b77ab75f-32f2-4664-a48e-76699f609a7b-combined-ca-bundle\") pod \"barbican-worker-679bb9cf9-plnhs\" (UID: \"b77ab75f-32f2-4664-a48e-76699f609a7b\") " pod="openstack/barbican-worker-679bb9cf9-plnhs" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.925022 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.931935 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b77ab75f-32f2-4664-a48e-76699f609a7b-config-data\") pod \"barbican-worker-679bb9cf9-plnhs\" (UID: \"b77ab75f-32f2-4664-a48e-76699f609a7b\") " pod="openstack/barbican-worker-679bb9cf9-plnhs" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.939038 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mggsf\" (UniqueName: \"kubernetes.io/projected/b77ab75f-32f2-4664-a48e-76699f609a7b-kube-api-access-mggsf\") pod \"barbican-worker-679bb9cf9-plnhs\" (UID: \"b77ab75f-32f2-4664-a48e-76699f609a7b\") " pod="openstack/barbican-worker-679bb9cf9-plnhs" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.943973 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.944204 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b77ab75f-32f2-4664-a48e-76699f609a7b-config-data-custom\") pod \"barbican-worker-679bb9cf9-plnhs\" (UID: \"b77ab75f-32f2-4664-a48e-76699f609a7b\") " pod="openstack/barbican-worker-679bb9cf9-plnhs" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.968506 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5f7f57bf4-26bpl"] Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.969781 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.972272 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 25 23:18:22 crc kubenswrapper[5045]: I1125 23:18:22.982026 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-679bb9cf9-plnhs" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.000539 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-ovsdbserver-nb\") pod \"dnsmasq-dns-869f779d85-s5jgk\" (UID: \"391a1333-2eca-4967-9d3f-713c564c0bfe\") " pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.000622 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-dns-svc\") pod \"dnsmasq-dns-869f779d85-s5jgk\" (UID: \"391a1333-2eca-4967-9d3f-713c564c0bfe\") " pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.000675 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-config\") pod \"dnsmasq-dns-869f779d85-s5jgk\" (UID: \"391a1333-2eca-4967-9d3f-713c564c0bfe\") " pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.000697 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9g7xr\" (UniqueName: \"kubernetes.io/projected/391a1333-2eca-4967-9d3f-713c564c0bfe-kube-api-access-9g7xr\") pod \"dnsmasq-dns-869f779d85-s5jgk\" (UID: \"391a1333-2eca-4967-9d3f-713c564c0bfe\") " pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.000764 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-ovsdbserver-sb\") pod \"dnsmasq-dns-869f779d85-s5jgk\" (UID: \"391a1333-2eca-4967-9d3f-713c564c0bfe\") " pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.001743 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-ovsdbserver-sb\") pod \"dnsmasq-dns-869f779d85-s5jgk\" (UID: \"391a1333-2eca-4967-9d3f-713c564c0bfe\") " pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.002400 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5f7f57bf4-26bpl"] Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.002430 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-ovsdbserver-nb\") pod \"dnsmasq-dns-869f779d85-s5jgk\" (UID: \"391a1333-2eca-4967-9d3f-713c564c0bfe\") " pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.003069 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-config\") pod \"dnsmasq-dns-869f779d85-s5jgk\" (UID: \"391a1333-2eca-4967-9d3f-713c564c0bfe\") " pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.005100 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-dns-svc\") pod \"dnsmasq-dns-869f779d85-s5jgk\" (UID: \"391a1333-2eca-4967-9d3f-713c564c0bfe\") " pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.020637 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9g7xr\" (UniqueName: \"kubernetes.io/projected/391a1333-2eca-4967-9d3f-713c564c0bfe-kube-api-access-9g7xr\") pod \"dnsmasq-dns-869f779d85-s5jgk\" (UID: \"391a1333-2eca-4967-9d3f-713c564c0bfe\") " pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.115551 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-logs\") pod \"barbican-api-5f7f57bf4-26bpl\" (UID: \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\") " pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.115667 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-combined-ca-bundle\") pod \"barbican-api-5f7f57bf4-26bpl\" (UID: \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\") " pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.115704 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-config-data-custom\") pod \"barbican-api-5f7f57bf4-26bpl\" (UID: \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\") " pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.115733 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxwpk\" (UniqueName: \"kubernetes.io/projected/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-kube-api-access-xxwpk\") pod \"barbican-api-5f7f57bf4-26bpl\" (UID: \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\") " pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.115766 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-config-data\") pod \"barbican-api-5f7f57bf4-26bpl\" (UID: \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\") " pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.174004 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5db8cdc695-2sz2g" event={"ID":"6970371c-e072-49ea-97b5-a6bed28d5372","Type":"ContainerStarted","Data":"a64094aa4b17626eb2cc8376410097b12a1fad3fc55e4156f7a124bbf12499a8"} Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.174335 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5db8cdc695-2sz2g" event={"ID":"6970371c-e072-49ea-97b5-a6bed28d5372","Type":"ContainerStarted","Data":"eb12028477f8c39598132fb006c99e35100410b9947c43949b77b2045d5f38fe"} Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.174701 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.190442 5045 generic.go:334] "Generic (PLEG): container finished" podID="2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74" containerID="ba5c95998aee863daad136e4adbdff3a75bff32e0bb79706ddc25d0ddbaa6aa7" exitCode=0 Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.190817 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f66db59b9-69528" event={"ID":"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74","Type":"ContainerDied","Data":"ba5c95998aee863daad136e4adbdff3a75bff32e0bb79706ddc25d0ddbaa6aa7"} Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.220083 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-logs\") pod \"barbican-api-5f7f57bf4-26bpl\" (UID: \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\") " pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.220397 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-combined-ca-bundle\") pod \"barbican-api-5f7f57bf4-26bpl\" (UID: \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\") " pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.220576 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-config-data-custom\") pod \"barbican-api-5f7f57bf4-26bpl\" (UID: \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\") " pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.220685 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxwpk\" (UniqueName: \"kubernetes.io/projected/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-kube-api-access-xxwpk\") pod \"barbican-api-5f7f57bf4-26bpl\" (UID: \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\") " pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.220833 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-config-data\") pod \"barbican-api-5f7f57bf4-26bpl\" (UID: \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\") " pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.222078 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-logs\") pod \"barbican-api-5f7f57bf4-26bpl\" (UID: \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\") " pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.224153 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5db8cdc695-2sz2g" podStartSLOduration=6.224140723 podStartE2EDuration="6.224140723s" podCreationTimestamp="2025-11-25 23:18:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:18:23.201809841 +0000 UTC m=+1159.559468943" watchObservedRunningTime="2025-11-25 23:18:23.224140723 +0000 UTC m=+1159.581799835" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.230241 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-config-data-custom\") pod \"barbican-api-5f7f57bf4-26bpl\" (UID: \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\") " pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.230654 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-combined-ca-bundle\") pod \"barbican-api-5f7f57bf4-26bpl\" (UID: \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\") " pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.235315 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-config-data\") pod \"barbican-api-5f7f57bf4-26bpl\" (UID: \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\") " pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.250897 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxwpk\" (UniqueName: \"kubernetes.io/projected/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-kube-api-access-xxwpk\") pod \"barbican-api-5f7f57bf4-26bpl\" (UID: \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\") " pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.307900 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:23 crc kubenswrapper[5045]: I1125 23:18:23.329871 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:23.409351 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:23.531448 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-dns-svc\") pod \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\" (UID: \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\") " Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:23.531638 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-ovsdbserver-sb\") pod \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\" (UID: \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\") " Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:23.531700 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-ovsdbserver-nb\") pod \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\" (UID: \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\") " Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:23.531835 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-config\") pod \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\" (UID: \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\") " Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:23.531903 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4fmqv\" (UniqueName: \"kubernetes.io/projected/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-kube-api-access-4fmqv\") pod \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\" (UID: \"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74\") " Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:23.544965 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-kube-api-access-4fmqv" (OuterVolumeSpecName: "kube-api-access-4fmqv") pod "2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74" (UID: "2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74"). InnerVolumeSpecName "kube-api-access-4fmqv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:18:25 crc kubenswrapper[5045]: W1125 23:18:23.614609 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod930a6fb5_dcf0_455c_97a7_5446766b0d01.slice/crio-d059e89f1a81513e4bbf58b2099d548e0e1d11a61cff7abb136fcbad9ef6272f WatchSource:0}: Error finding container d059e89f1a81513e4bbf58b2099d548e0e1d11a61cff7abb136fcbad9ef6272f: Status 404 returned error can't find the container with id d059e89f1a81513e4bbf58b2099d548e0e1d11a61cff7abb136fcbad9ef6272f Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:23.615215 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-config" (OuterVolumeSpecName: "config") pod "2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74" (UID: "2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:23.624247 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74" (UID: "2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:23.626049 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74" (UID: "2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:23.627830 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74" (UID: "2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:23.634368 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-f86f47df6-cktpw"] Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:23.635339 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:23.635356 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4fmqv\" (UniqueName: \"kubernetes.io/projected/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-kube-api-access-4fmqv\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:23.635367 5045 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:23.635376 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:23.635384 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:24.199394 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" event={"ID":"930a6fb5-dcf0-455c-97a7-5446766b0d01","Type":"ContainerStarted","Data":"d059e89f1a81513e4bbf58b2099d548e0e1d11a61cff7abb136fcbad9ef6272f"} Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:24.203589 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f66db59b9-69528" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:24.203932 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f66db59b9-69528" event={"ID":"2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74","Type":"ContainerDied","Data":"5133d587603829532ef62ecd41e668f8d5f7c63565b38cc625e256fa5a82ab28"} Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:24.203980 5045 scope.go:117] "RemoveContainer" containerID="ba5c95998aee863daad136e4adbdff3a75bff32e0bb79706ddc25d0ddbaa6aa7" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:24.237465 5045 scope.go:117] "RemoveContainer" containerID="b1e0e21681339bc3ebf86fa757fdc3aeaf79beb20895a929d78e5fedf25ec126" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:24.237755 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f66db59b9-69528"] Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:24.245217 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5f66db59b9-69528"] Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:24.421953 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74" path="/var/lib/kubelet/pods/2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74/volumes" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.424329 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6477c784d-pjbws"] Nov 25 23:18:25 crc kubenswrapper[5045]: E1125 23:18:25.424855 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74" containerName="init" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.424866 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74" containerName="init" Nov 25 23:18:25 crc kubenswrapper[5045]: E1125 23:18:25.424894 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74" containerName="dnsmasq-dns" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.424901 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74" containerName="dnsmasq-dns" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.425081 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b4a1c6b-ec7c-49bc-b4dd-617c4caf2b74" containerName="dnsmasq-dns" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.426305 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.430880 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.431225 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.449472 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6477c784d-pjbws"] Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.573666 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4cf1306a-a479-4be9-9d81-a24e584294a5-config-data\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.573836 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4cf1306a-a479-4be9-9d81-a24e584294a5-logs\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.573893 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qx29\" (UniqueName: \"kubernetes.io/projected/4cf1306a-a479-4be9-9d81-a24e584294a5-kube-api-access-5qx29\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.573930 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4cf1306a-a479-4be9-9d81-a24e584294a5-config-data-custom\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.573970 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4cf1306a-a479-4be9-9d81-a24e584294a5-internal-tls-certs\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.575700 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cf1306a-a479-4be9-9d81-a24e584294a5-combined-ca-bundle\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.575801 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4cf1306a-a479-4be9-9d81-a24e584294a5-public-tls-certs\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.677282 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cf1306a-a479-4be9-9d81-a24e584294a5-combined-ca-bundle\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.677383 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4cf1306a-a479-4be9-9d81-a24e584294a5-public-tls-certs\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.677408 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4cf1306a-a479-4be9-9d81-a24e584294a5-config-data\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.678306 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4cf1306a-a479-4be9-9d81-a24e584294a5-logs\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.678346 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qx29\" (UniqueName: \"kubernetes.io/projected/4cf1306a-a479-4be9-9d81-a24e584294a5-kube-api-access-5qx29\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.678375 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4cf1306a-a479-4be9-9d81-a24e584294a5-config-data-custom\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.678410 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4cf1306a-a479-4be9-9d81-a24e584294a5-internal-tls-certs\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.678667 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4cf1306a-a479-4be9-9d81-a24e584294a5-logs\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.689793 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4cf1306a-a479-4be9-9d81-a24e584294a5-config-data\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.692878 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4cf1306a-a479-4be9-9d81-a24e584294a5-public-tls-certs\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.692936 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4cf1306a-a479-4be9-9d81-a24e584294a5-config-data-custom\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.692975 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cf1306a-a479-4be9-9d81-a24e584294a5-combined-ca-bundle\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.693291 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4cf1306a-a479-4be9-9d81-a24e584294a5-internal-tls-certs\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.696567 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qx29\" (UniqueName: \"kubernetes.io/projected/4cf1306a-a479-4be9-9d81-a24e584294a5-kube-api-access-5qx29\") pod \"barbican-api-6477c784d-pjbws\" (UID: \"4cf1306a-a479-4be9-9d81-a24e584294a5\") " pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.752006 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.852990 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-68c57f7894-dq5jz"] Nov 25 23:18:25 crc kubenswrapper[5045]: I1125 23:18:25.898157 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-679bb9cf9-plnhs"] Nov 25 23:18:26 crc kubenswrapper[5045]: I1125 23:18:26.030687 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-869f779d85-s5jgk"] Nov 25 23:18:26 crc kubenswrapper[5045]: I1125 23:18:26.042833 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5f7f57bf4-26bpl"] Nov 25 23:18:26 crc kubenswrapper[5045]: I1125 23:18:26.229226 5045 generic.go:334] "Generic (PLEG): container finished" podID="889492c0-db80-43f6-9a4f-36292139f3df" containerID="ab401352773cd5b2d5ca78e559ec38054346c749e815f0733ab5e4d3b18524a8" exitCode=0 Nov 25 23:18:26 crc kubenswrapper[5045]: I1125 23:18:26.229502 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-n58vj" event={"ID":"889492c0-db80-43f6-9a4f-36292139f3df","Type":"ContainerDied","Data":"ab401352773cd5b2d5ca78e559ec38054346c749e815f0733ab5e4d3b18524a8"} Nov 25 23:18:26 crc kubenswrapper[5045]: I1125 23:18:26.231201 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-68c57f7894-dq5jz" event={"ID":"e85b05a0-5d74-4df9-b09c-a68596f45b6e","Type":"ContainerStarted","Data":"4d85eaee2b48dacaba870e9129806c47afac5cf25688e17f61c81492c366be22"} Nov 25 23:18:26 crc kubenswrapper[5045]: I1125 23:18:26.866441 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6477c784d-pjbws"] Nov 25 23:18:26 crc kubenswrapper[5045]: W1125 23:18:26.868855 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb77ab75f_32f2_4664_a48e_76699f609a7b.slice/crio-090c706afc0719a6ddb505a7231611dd2b73aeb72602afbd96a1fc3073f8dea1 WatchSource:0}: Error finding container 090c706afc0719a6ddb505a7231611dd2b73aeb72602afbd96a1fc3073f8dea1: Status 404 returned error can't find the container with id 090c706afc0719a6ddb505a7231611dd2b73aeb72602afbd96a1fc3073f8dea1 Nov 25 23:18:26 crc kubenswrapper[5045]: W1125 23:18:26.882771 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podecbd3988_6dd2_4f7c_b59e_ec1483e6fab1.slice/crio-8734754bdecdd981b9aa02aac9870c5bc90cec69758d88c733cc1e9bc08b0c62 WatchSource:0}: Error finding container 8734754bdecdd981b9aa02aac9870c5bc90cec69758d88c733cc1e9bc08b0c62: Status 404 returned error can't find the container with id 8734754bdecdd981b9aa02aac9870c5bc90cec69758d88c733cc1e9bc08b0c62 Nov 25 23:18:26 crc kubenswrapper[5045]: W1125 23:18:26.883872 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4cf1306a_a479_4be9_9d81_a24e584294a5.slice/crio-9c1b73bfc86e43fa7a5fccc0e9e3a040701e124572c2ea1fad9533c758d20687 WatchSource:0}: Error finding container 9c1b73bfc86e43fa7a5fccc0e9e3a040701e124572c2ea1fad9533c758d20687: Status 404 returned error can't find the container with id 9c1b73bfc86e43fa7a5fccc0e9e3a040701e124572c2ea1fad9533c758d20687 Nov 25 23:18:27 crc kubenswrapper[5045]: I1125 23:18:27.242031 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-68c57f7894-dq5jz" event={"ID":"e85b05a0-5d74-4df9-b09c-a68596f45b6e","Type":"ContainerStarted","Data":"fc098f8c930dc706a13a33cb3bf64688f22965a7740251d6d4c8fb34acad4640"} Nov 25 23:18:27 crc kubenswrapper[5045]: I1125 23:18:27.242386 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:27 crc kubenswrapper[5045]: I1125 23:18:27.246805 5045 generic.go:334] "Generic (PLEG): container finished" podID="391a1333-2eca-4967-9d3f-713c564c0bfe" containerID="59e90f7f643b3d6f5b5e02f51e68b103d5e8636f11ddb89f51644eddfce1d4ec" exitCode=0 Nov 25 23:18:27 crc kubenswrapper[5045]: I1125 23:18:27.246915 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-869f779d85-s5jgk" event={"ID":"391a1333-2eca-4967-9d3f-713c564c0bfe","Type":"ContainerDied","Data":"59e90f7f643b3d6f5b5e02f51e68b103d5e8636f11ddb89f51644eddfce1d4ec"} Nov 25 23:18:27 crc kubenswrapper[5045]: I1125 23:18:27.247014 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-869f779d85-s5jgk" event={"ID":"391a1333-2eca-4967-9d3f-713c564c0bfe","Type":"ContainerStarted","Data":"ec606593ba8b37c3ff9afc5d1827f9588773670a284c6ff9f2ce374e50b18760"} Nov 25 23:18:27 crc kubenswrapper[5045]: I1125 23:18:27.249239 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6477c784d-pjbws" event={"ID":"4cf1306a-a479-4be9-9d81-a24e584294a5","Type":"ContainerStarted","Data":"cde4fa3f2f7656a5b2f68bd05ecad12006b5fb1224a60c8589b9c2af1173faea"} Nov 25 23:18:27 crc kubenswrapper[5045]: I1125 23:18:27.249302 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6477c784d-pjbws" event={"ID":"4cf1306a-a479-4be9-9d81-a24e584294a5","Type":"ContainerStarted","Data":"9c1b73bfc86e43fa7a5fccc0e9e3a040701e124572c2ea1fad9533c758d20687"} Nov 25 23:18:27 crc kubenswrapper[5045]: I1125 23:18:27.253013 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-679bb9cf9-plnhs" event={"ID":"b77ab75f-32f2-4664-a48e-76699f609a7b","Type":"ContainerStarted","Data":"090c706afc0719a6ddb505a7231611dd2b73aeb72602afbd96a1fc3073f8dea1"} Nov 25 23:18:27 crc kubenswrapper[5045]: I1125 23:18:27.254882 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f7f57bf4-26bpl" event={"ID":"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1","Type":"ContainerStarted","Data":"b4702f8cca3a5df39a7dbb659d4cdafcb2fc2233f3fbfc7de1a76ab48ebb448c"} Nov 25 23:18:27 crc kubenswrapper[5045]: I1125 23:18:27.254946 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f7f57bf4-26bpl" event={"ID":"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1","Type":"ContainerStarted","Data":"8734754bdecdd981b9aa02aac9870c5bc90cec69758d88c733cc1e9bc08b0c62"} Nov 25 23:18:27 crc kubenswrapper[5045]: I1125 23:18:27.291681 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-68c57f7894-dq5jz" podStartSLOduration=5.291661294 podStartE2EDuration="5.291661294s" podCreationTimestamp="2025-11-25 23:18:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:18:27.267281624 +0000 UTC m=+1163.624940736" watchObservedRunningTime="2025-11-25 23:18:27.291661294 +0000 UTC m=+1163.649320426" Nov 25 23:18:27 crc kubenswrapper[5045]: I1125 23:18:27.930497 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-n58vj" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.017257 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/889492c0-db80-43f6-9a4f-36292139f3df-combined-ca-bundle\") pod \"889492c0-db80-43f6-9a4f-36292139f3df\" (UID: \"889492c0-db80-43f6-9a4f-36292139f3df\") " Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.017334 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/889492c0-db80-43f6-9a4f-36292139f3df-config-data\") pod \"889492c0-db80-43f6-9a4f-36292139f3df\" (UID: \"889492c0-db80-43f6-9a4f-36292139f3df\") " Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.017453 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/889492c0-db80-43f6-9a4f-36292139f3df-scripts\") pod \"889492c0-db80-43f6-9a4f-36292139f3df\" (UID: \"889492c0-db80-43f6-9a4f-36292139f3df\") " Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.017562 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5mhr7\" (UniqueName: \"kubernetes.io/projected/889492c0-db80-43f6-9a4f-36292139f3df-kube-api-access-5mhr7\") pod \"889492c0-db80-43f6-9a4f-36292139f3df\" (UID: \"889492c0-db80-43f6-9a4f-36292139f3df\") " Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.017666 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/889492c0-db80-43f6-9a4f-36292139f3df-logs\") pod \"889492c0-db80-43f6-9a4f-36292139f3df\" (UID: \"889492c0-db80-43f6-9a4f-36292139f3df\") " Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.018376 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/889492c0-db80-43f6-9a4f-36292139f3df-logs" (OuterVolumeSpecName: "logs") pod "889492c0-db80-43f6-9a4f-36292139f3df" (UID: "889492c0-db80-43f6-9a4f-36292139f3df"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.022843 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/889492c0-db80-43f6-9a4f-36292139f3df-kube-api-access-5mhr7" (OuterVolumeSpecName: "kube-api-access-5mhr7") pod "889492c0-db80-43f6-9a4f-36292139f3df" (UID: "889492c0-db80-43f6-9a4f-36292139f3df"). InnerVolumeSpecName "kube-api-access-5mhr7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.023929 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/889492c0-db80-43f6-9a4f-36292139f3df-scripts" (OuterVolumeSpecName: "scripts") pod "889492c0-db80-43f6-9a4f-36292139f3df" (UID: "889492c0-db80-43f6-9a4f-36292139f3df"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.069377 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/889492c0-db80-43f6-9a4f-36292139f3df-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "889492c0-db80-43f6-9a4f-36292139f3df" (UID: "889492c0-db80-43f6-9a4f-36292139f3df"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.078484 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/889492c0-db80-43f6-9a4f-36292139f3df-config-data" (OuterVolumeSpecName: "config-data") pod "889492c0-db80-43f6-9a4f-36292139f3df" (UID: "889492c0-db80-43f6-9a4f-36292139f3df"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.122745 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5mhr7\" (UniqueName: \"kubernetes.io/projected/889492c0-db80-43f6-9a4f-36292139f3df-kube-api-access-5mhr7\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.123101 5045 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/889492c0-db80-43f6-9a4f-36292139f3df-logs\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.123114 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/889492c0-db80-43f6-9a4f-36292139f3df-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.123129 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/889492c0-db80-43f6-9a4f-36292139f3df-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.123141 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/889492c0-db80-43f6-9a4f-36292139f3df-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.274871 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-869f779d85-s5jgk" event={"ID":"391a1333-2eca-4967-9d3f-713c564c0bfe","Type":"ContainerStarted","Data":"142088eeab9eb9a65ddbc000fc03e76c306a501e3634df6ec644771acecdaf13"} Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.276154 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.289366 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-n58vj" event={"ID":"889492c0-db80-43f6-9a4f-36292139f3df","Type":"ContainerDied","Data":"b47fa67a8a311d6a93d29782358d0a9754ae3149831de254cb89b5db4e35b27e"} Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.289405 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b47fa67a8a311d6a93d29782358d0a9754ae3149831de254cb89b5db4e35b27e" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.289479 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-n58vj" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.299023 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-869f779d85-s5jgk" podStartSLOduration=6.2990088 podStartE2EDuration="6.2990088s" podCreationTimestamp="2025-11-25 23:18:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:18:28.297346612 +0000 UTC m=+1164.655005744" watchObservedRunningTime="2025-11-25 23:18:28.2990088 +0000 UTC m=+1164.656667912" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.314316 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6477c784d-pjbws" event={"ID":"4cf1306a-a479-4be9-9d81-a24e584294a5","Type":"ContainerStarted","Data":"a9a34c86eeeb043a839908d305cf9c66199981c1c6360e079d13849f3e69f48d"} Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.315808 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.315847 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.327187 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f7f57bf4-26bpl" event={"ID":"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1","Type":"ContainerStarted","Data":"0bea458b02e01977810082a9a475f6175616ce36495a63e84943a21da03f43d1"} Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.327695 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.328046 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.350802 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-lr4xs" event={"ID":"483c4f92-701f-4dae-a00a-3a3d753d8c17","Type":"ContainerStarted","Data":"ba879f8c25bf12fc747c8faa3ae7c732f6332e397a9ebbaba25e190b478b0788"} Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.359758 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6477c784d-pjbws" podStartSLOduration=3.359739625 podStartE2EDuration="3.359739625s" podCreationTimestamp="2025-11-25 23:18:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:18:28.351247171 +0000 UTC m=+1164.708906283" watchObservedRunningTime="2025-11-25 23:18:28.359739625 +0000 UTC m=+1164.717398737" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.380613 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" event={"ID":"930a6fb5-dcf0-455c-97a7-5446766b0d01","Type":"ContainerStarted","Data":"9cafbf5d17e728a0fa86088f64ae35fe652484224a3c4da5f54e82b061f18871"} Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.380649 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" event={"ID":"930a6fb5-dcf0-455c-97a7-5446766b0d01","Type":"ContainerStarted","Data":"6ebf88c1ddcb52f7b15e26d62c22d2072f8cc8dc6649e9eb59bd2a7b73379a9f"} Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.387819 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-65cddd5cf6-fpz78"] Nov 25 23:18:28 crc kubenswrapper[5045]: E1125 23:18:28.388146 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="889492c0-db80-43f6-9a4f-36292139f3df" containerName="placement-db-sync" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.388163 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="889492c0-db80-43f6-9a4f-36292139f3df" containerName="placement-db-sync" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.388335 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="889492c0-db80-43f6-9a4f-36292139f3df" containerName="placement-db-sync" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.389233 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.395103 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5f7f57bf4-26bpl" podStartSLOduration=6.39503743 podStartE2EDuration="6.39503743s" podCreationTimestamp="2025-11-25 23:18:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:18:28.372862772 +0000 UTC m=+1164.730521874" watchObservedRunningTime="2025-11-25 23:18:28.39503743 +0000 UTC m=+1164.752696542" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.399810 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.402054 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.402887 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-b5hms" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.403121 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.403298 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.421029 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-65cddd5cf6-fpz78"] Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.423858 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-lr4xs" podStartSLOduration=2.410207053 podStartE2EDuration="40.423846717s" podCreationTimestamp="2025-11-25 23:17:48 +0000 UTC" firstStartedPulling="2025-11-25 23:17:48.987405609 +0000 UTC m=+1125.345064721" lastFinishedPulling="2025-11-25 23:18:27.001045273 +0000 UTC m=+1163.358704385" observedRunningTime="2025-11-25 23:18:28.41628871 +0000 UTC m=+1164.773947822" watchObservedRunningTime="2025-11-25 23:18:28.423846717 +0000 UTC m=+1164.781505819" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.446901 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-f86f47df6-cktpw" podStartSLOduration=3.088525456 podStartE2EDuration="6.446881049s" podCreationTimestamp="2025-11-25 23:18:22 +0000 UTC" firstStartedPulling="2025-11-25 23:18:23.62041079 +0000 UTC m=+1159.978069902" lastFinishedPulling="2025-11-25 23:18:26.978766373 +0000 UTC m=+1163.336425495" observedRunningTime="2025-11-25 23:18:28.435940515 +0000 UTC m=+1164.793599627" watchObservedRunningTime="2025-11-25 23:18:28.446881049 +0000 UTC m=+1164.804540161" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.543124 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a86c07d-787b-4255-8861-e1c03bc78303-config-data\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.543202 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a86c07d-787b-4255-8861-e1c03bc78303-scripts\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.543245 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a86c07d-787b-4255-8861-e1c03bc78303-public-tls-certs\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.543268 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sb4nt\" (UniqueName: \"kubernetes.io/projected/3a86c07d-787b-4255-8861-e1c03bc78303-kube-api-access-sb4nt\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.543365 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a86c07d-787b-4255-8861-e1c03bc78303-combined-ca-bundle\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.543488 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a86c07d-787b-4255-8861-e1c03bc78303-logs\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.543543 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a86c07d-787b-4255-8861-e1c03bc78303-internal-tls-certs\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.645069 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a86c07d-787b-4255-8861-e1c03bc78303-config-data\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.645136 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a86c07d-787b-4255-8861-e1c03bc78303-scripts\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.645179 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a86c07d-787b-4255-8861-e1c03bc78303-public-tls-certs\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.645206 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sb4nt\" (UniqueName: \"kubernetes.io/projected/3a86c07d-787b-4255-8861-e1c03bc78303-kube-api-access-sb4nt\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.645264 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a86c07d-787b-4255-8861-e1c03bc78303-combined-ca-bundle\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.645332 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a86c07d-787b-4255-8861-e1c03bc78303-logs\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.645384 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a86c07d-787b-4255-8861-e1c03bc78303-internal-tls-certs\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.646321 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a86c07d-787b-4255-8861-e1c03bc78303-logs\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.648472 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a86c07d-787b-4255-8861-e1c03bc78303-scripts\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.649499 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a86c07d-787b-4255-8861-e1c03bc78303-combined-ca-bundle\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.649502 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a86c07d-787b-4255-8861-e1c03bc78303-config-data\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.649649 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a86c07d-787b-4255-8861-e1c03bc78303-public-tls-certs\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.652254 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a86c07d-787b-4255-8861-e1c03bc78303-internal-tls-certs\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.670179 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sb4nt\" (UniqueName: \"kubernetes.io/projected/3a86c07d-787b-4255-8861-e1c03bc78303-kube-api-access-sb4nt\") pod \"placement-65cddd5cf6-fpz78\" (UID: \"3a86c07d-787b-4255-8861-e1c03bc78303\") " pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:28 crc kubenswrapper[5045]: I1125 23:18:28.716981 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:30 crc kubenswrapper[5045]: I1125 23:18:30.540700 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:18:30 crc kubenswrapper[5045]: I1125 23:18:30.541001 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:18:30 crc kubenswrapper[5045]: I1125 23:18:30.541044 5045 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 23:18:30 crc kubenswrapper[5045]: I1125 23:18:30.541697 5045 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ad71cc49c7fac24fcefe379082dd68e60c6857b81de28d84310c7d3a35f4b46a"} pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 23:18:30 crc kubenswrapper[5045]: I1125 23:18:30.541769 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" containerID="cri-o://ad71cc49c7fac24fcefe379082dd68e60c6857b81de28d84310c7d3a35f4b46a" gracePeriod=600 Nov 25 23:18:31 crc kubenswrapper[5045]: I1125 23:18:31.408820 5045 generic.go:334] "Generic (PLEG): container finished" podID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerID="ad71cc49c7fac24fcefe379082dd68e60c6857b81de28d84310c7d3a35f4b46a" exitCode=0 Nov 25 23:18:31 crc kubenswrapper[5045]: I1125 23:18:31.409111 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerDied","Data":"ad71cc49c7fac24fcefe379082dd68e60c6857b81de28d84310c7d3a35f4b46a"} Nov 25 23:18:31 crc kubenswrapper[5045]: I1125 23:18:31.409141 5045 scope.go:117] "RemoveContainer" containerID="ac9b25e6d63635c2d6571dedc7552214abf9eda1f3832a75a6240c6a0f672f51" Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.310842 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.376427 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b6dbdb6f5-47bq9"] Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.377268 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" podUID="82d48403-8197-427a-863b-e61cf561bb37" containerName="dnsmasq-dns" containerID="cri-o://21db3166f2085838b8b44d91f5c21c6f374f6b0a22d88f84baf9eac0120a5cd3" gracePeriod=10 Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.466057 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerStarted","Data":"a38112312cf3b5e81d6e73cbd55baa1467e1d3453486aaf8dbc83baabfa5d34a"} Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.469051 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-679bb9cf9-plnhs" event={"ID":"b77ab75f-32f2-4664-a48e-76699f609a7b","Type":"ContainerStarted","Data":"71de27355afd74cd956c14a7c73be9eb85bd6b00b66847c9e1d90fefd5bc97ca"} Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.484778 5045 generic.go:334] "Generic (PLEG): container finished" podID="483c4f92-701f-4dae-a00a-3a3d753d8c17" containerID="ba879f8c25bf12fc747c8faa3ae7c732f6332e397a9ebbaba25e190b478b0788" exitCode=0 Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.484994 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-lr4xs" event={"ID":"483c4f92-701f-4dae-a00a-3a3d753d8c17","Type":"ContainerDied","Data":"ba879f8c25bf12fc747c8faa3ae7c732f6332e397a9ebbaba25e190b478b0788"} Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.515896 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d264a9e9-964e-4764-bb10-466754e4e77a" containerName="ceilometer-central-agent" containerID="cri-o://d7f1317ff1ae899968a4be7a67d042903b769b8e9101e48303f747617094e99d" gracePeriod=30 Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.516087 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.516516 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d264a9e9-964e-4764-bb10-466754e4e77a" containerName="sg-core" containerID="cri-o://6c90ba0300d881d80fc270ac5bd982676a4eb5a0535df1f5587476ba47223757" gracePeriod=30 Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.516762 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d264a9e9-964e-4764-bb10-466754e4e77a" containerName="proxy-httpd" containerID="cri-o://ebb1af0ccae336e80c7a28af9f1cbfabfbf9365cda1c3c3fd6bb1542dc9ed599" gracePeriod=30 Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.516844 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d264a9e9-964e-4764-bb10-466754e4e77a" containerName="ceilometer-notification-agent" containerID="cri-o://bdec37632121d3535f3dfdb6b0610b5374b313598ecb6fcf11b0ddb7a5f880eb" gracePeriod=30 Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.577401 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-65cddd5cf6-fpz78"] Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.587303 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.071912486 podStartE2EDuration="45.587280795s" podCreationTimestamp="2025-11-25 23:17:48 +0000 UTC" firstStartedPulling="2025-11-25 23:17:49.559692614 +0000 UTC m=+1125.917351766" lastFinishedPulling="2025-11-25 23:18:33.075060923 +0000 UTC m=+1169.432720075" observedRunningTime="2025-11-25 23:18:33.55641063 +0000 UTC m=+1169.914069742" watchObservedRunningTime="2025-11-25 23:18:33.587280795 +0000 UTC m=+1169.944939907" Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.836238 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.961905 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjtvj\" (UniqueName: \"kubernetes.io/projected/82d48403-8197-427a-863b-e61cf561bb37-kube-api-access-hjtvj\") pod \"82d48403-8197-427a-863b-e61cf561bb37\" (UID: \"82d48403-8197-427a-863b-e61cf561bb37\") " Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.962030 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-config\") pod \"82d48403-8197-427a-863b-e61cf561bb37\" (UID: \"82d48403-8197-427a-863b-e61cf561bb37\") " Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.962108 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-ovsdbserver-sb\") pod \"82d48403-8197-427a-863b-e61cf561bb37\" (UID: \"82d48403-8197-427a-863b-e61cf561bb37\") " Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.962189 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-dns-svc\") pod \"82d48403-8197-427a-863b-e61cf561bb37\" (UID: \"82d48403-8197-427a-863b-e61cf561bb37\") " Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.962244 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-ovsdbserver-nb\") pod \"82d48403-8197-427a-863b-e61cf561bb37\" (UID: \"82d48403-8197-427a-863b-e61cf561bb37\") " Nov 25 23:18:33 crc kubenswrapper[5045]: I1125 23:18:33.967429 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82d48403-8197-427a-863b-e61cf561bb37-kube-api-access-hjtvj" (OuterVolumeSpecName: "kube-api-access-hjtvj") pod "82d48403-8197-427a-863b-e61cf561bb37" (UID: "82d48403-8197-427a-863b-e61cf561bb37"). InnerVolumeSpecName "kube-api-access-hjtvj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.024290 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-config" (OuterVolumeSpecName: "config") pod "82d48403-8197-427a-863b-e61cf561bb37" (UID: "82d48403-8197-427a-863b-e61cf561bb37"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.024613 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "82d48403-8197-427a-863b-e61cf561bb37" (UID: "82d48403-8197-427a-863b-e61cf561bb37"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.024628 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "82d48403-8197-427a-863b-e61cf561bb37" (UID: "82d48403-8197-427a-863b-e61cf561bb37"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.040505 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "82d48403-8197-427a-863b-e61cf561bb37" (UID: "82d48403-8197-427a-863b-e61cf561bb37"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.063772 5045 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.064023 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.064039 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjtvj\" (UniqueName: \"kubernetes.io/projected/82d48403-8197-427a-863b-e61cf561bb37-kube-api-access-hjtvj\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.064050 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.064058 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82d48403-8197-427a-863b-e61cf561bb37-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.523920 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-679bb9cf9-plnhs" event={"ID":"b77ab75f-32f2-4664-a48e-76699f609a7b","Type":"ContainerStarted","Data":"28f43181ba6b9174cfaa62b7c8c1949cec8b520856c3b44dbd077fe6c8fb6297"} Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.525431 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-65cddd5cf6-fpz78" event={"ID":"3a86c07d-787b-4255-8861-e1c03bc78303","Type":"ContainerStarted","Data":"abbd20f3bc253b5169733f107f05869c7a1148fdb3a78487d36c4227c19a8503"} Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.525470 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-65cddd5cf6-fpz78" event={"ID":"3a86c07d-787b-4255-8861-e1c03bc78303","Type":"ContainerStarted","Data":"10d47f2ddf55e59d5e448ecf8783846e17afd726b237fbc68e820d667cf5fdcc"} Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.525483 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-65cddd5cf6-fpz78" event={"ID":"3a86c07d-787b-4255-8861-e1c03bc78303","Type":"ContainerStarted","Data":"34631b7537377ca664015be709413c25569e5451973afd62ff4e2c8771247f5c"} Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.525553 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.527525 5045 generic.go:334] "Generic (PLEG): container finished" podID="d264a9e9-964e-4764-bb10-466754e4e77a" containerID="6c90ba0300d881d80fc270ac5bd982676a4eb5a0535df1f5587476ba47223757" exitCode=2 Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.527557 5045 generic.go:334] "Generic (PLEG): container finished" podID="d264a9e9-964e-4764-bb10-466754e4e77a" containerID="d7f1317ff1ae899968a4be7a67d042903b769b8e9101e48303f747617094e99d" exitCode=0 Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.527593 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d264a9e9-964e-4764-bb10-466754e4e77a","Type":"ContainerStarted","Data":"ebb1af0ccae336e80c7a28af9f1cbfabfbf9365cda1c3c3fd6bb1542dc9ed599"} Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.527624 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d264a9e9-964e-4764-bb10-466754e4e77a","Type":"ContainerDied","Data":"6c90ba0300d881d80fc270ac5bd982676a4eb5a0535df1f5587476ba47223757"} Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.527642 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d264a9e9-964e-4764-bb10-466754e4e77a","Type":"ContainerDied","Data":"d7f1317ff1ae899968a4be7a67d042903b769b8e9101e48303f747617094e99d"} Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.528923 5045 generic.go:334] "Generic (PLEG): container finished" podID="82d48403-8197-427a-863b-e61cf561bb37" containerID="21db3166f2085838b8b44d91f5c21c6f374f6b0a22d88f84baf9eac0120a5cd3" exitCode=0 Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.528940 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.528963 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" event={"ID":"82d48403-8197-427a-863b-e61cf561bb37","Type":"ContainerDied","Data":"21db3166f2085838b8b44d91f5c21c6f374f6b0a22d88f84baf9eac0120a5cd3"} Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.529087 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" event={"ID":"82d48403-8197-427a-863b-e61cf561bb37","Type":"ContainerDied","Data":"a07c421f049fda9873cb2be2ae47e276a2e6a77d70d35e13a26550a19d2637d0"} Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.529108 5045 scope.go:117] "RemoveContainer" containerID="21db3166f2085838b8b44d91f5c21c6f374f6b0a22d88f84baf9eac0120a5cd3" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.554007 5045 scope.go:117] "RemoveContainer" containerID="abd1a15115a3d987092837c8cc5d0b4816050b031381d6b125fd330d96e3e349" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.559497 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-679bb9cf9-plnhs" podStartSLOduration=6.400518345 podStartE2EDuration="12.559454943s" podCreationTimestamp="2025-11-25 23:18:22 +0000 UTC" firstStartedPulling="2025-11-25 23:18:26.870953245 +0000 UTC m=+1163.228612357" lastFinishedPulling="2025-11-25 23:18:33.029889813 +0000 UTC m=+1169.387548955" observedRunningTime="2025-11-25 23:18:34.543941853 +0000 UTC m=+1170.901600955" watchObservedRunningTime="2025-11-25 23:18:34.559454943 +0000 UTC m=+1170.917114055" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.579522 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b6dbdb6f5-47bq9"] Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.605191 5045 scope.go:117] "RemoveContainer" containerID="21db3166f2085838b8b44d91f5c21c6f374f6b0a22d88f84baf9eac0120a5cd3" Nov 25 23:18:34 crc kubenswrapper[5045]: E1125 23:18:34.606604 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21db3166f2085838b8b44d91f5c21c6f374f6b0a22d88f84baf9eac0120a5cd3\": container with ID starting with 21db3166f2085838b8b44d91f5c21c6f374f6b0a22d88f84baf9eac0120a5cd3 not found: ID does not exist" containerID="21db3166f2085838b8b44d91f5c21c6f374f6b0a22d88f84baf9eac0120a5cd3" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.606647 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21db3166f2085838b8b44d91f5c21c6f374f6b0a22d88f84baf9eac0120a5cd3"} err="failed to get container status \"21db3166f2085838b8b44d91f5c21c6f374f6b0a22d88f84baf9eac0120a5cd3\": rpc error: code = NotFound desc = could not find container \"21db3166f2085838b8b44d91f5c21c6f374f6b0a22d88f84baf9eac0120a5cd3\": container with ID starting with 21db3166f2085838b8b44d91f5c21c6f374f6b0a22d88f84baf9eac0120a5cd3 not found: ID does not exist" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.606674 5045 scope.go:117] "RemoveContainer" containerID="abd1a15115a3d987092837c8cc5d0b4816050b031381d6b125fd330d96e3e349" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.606823 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b6dbdb6f5-47bq9"] Nov 25 23:18:34 crc kubenswrapper[5045]: E1125 23:18:34.607361 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"abd1a15115a3d987092837c8cc5d0b4816050b031381d6b125fd330d96e3e349\": container with ID starting with abd1a15115a3d987092837c8cc5d0b4816050b031381d6b125fd330d96e3e349 not found: ID does not exist" containerID="abd1a15115a3d987092837c8cc5d0b4816050b031381d6b125fd330d96e3e349" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.607435 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abd1a15115a3d987092837c8cc5d0b4816050b031381d6b125fd330d96e3e349"} err="failed to get container status \"abd1a15115a3d987092837c8cc5d0b4816050b031381d6b125fd330d96e3e349\": rpc error: code = NotFound desc = could not find container \"abd1a15115a3d987092837c8cc5d0b4816050b031381d6b125fd330d96e3e349\": container with ID starting with abd1a15115a3d987092837c8cc5d0b4816050b031381d6b125fd330d96e3e349 not found: ID does not exist" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.614779 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-65cddd5cf6-fpz78" podStartSLOduration=6.614747967 podStartE2EDuration="6.614747967s" podCreationTimestamp="2025-11-25 23:18:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:18:34.594473019 +0000 UTC m=+1170.952132141" watchObservedRunningTime="2025-11-25 23:18:34.614747967 +0000 UTC m=+1170.972407079" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.916556 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:18:34 crc kubenswrapper[5045]: I1125 23:18:34.987409 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.079076 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-combined-ca-bundle\") pod \"483c4f92-701f-4dae-a00a-3a3d753d8c17\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.079174 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvslz\" (UniqueName: \"kubernetes.io/projected/483c4f92-701f-4dae-a00a-3a3d753d8c17-kube-api-access-zvslz\") pod \"483c4f92-701f-4dae-a00a-3a3d753d8c17\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.079256 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-db-sync-config-data\") pod \"483c4f92-701f-4dae-a00a-3a3d753d8c17\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.079282 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/483c4f92-701f-4dae-a00a-3a3d753d8c17-etc-machine-id\") pod \"483c4f92-701f-4dae-a00a-3a3d753d8c17\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.079301 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-config-data\") pod \"483c4f92-701f-4dae-a00a-3a3d753d8c17\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.079355 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-scripts\") pod \"483c4f92-701f-4dae-a00a-3a3d753d8c17\" (UID: \"483c4f92-701f-4dae-a00a-3a3d753d8c17\") " Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.079349 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/483c4f92-701f-4dae-a00a-3a3d753d8c17-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "483c4f92-701f-4dae-a00a-3a3d753d8c17" (UID: "483c4f92-701f-4dae-a00a-3a3d753d8c17"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.079694 5045 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/483c4f92-701f-4dae-a00a-3a3d753d8c17-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.086414 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/483c4f92-701f-4dae-a00a-3a3d753d8c17-kube-api-access-zvslz" (OuterVolumeSpecName: "kube-api-access-zvslz") pod "483c4f92-701f-4dae-a00a-3a3d753d8c17" (UID: "483c4f92-701f-4dae-a00a-3a3d753d8c17"). InnerVolumeSpecName "kube-api-access-zvslz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.088851 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-scripts" (OuterVolumeSpecName: "scripts") pod "483c4f92-701f-4dae-a00a-3a3d753d8c17" (UID: "483c4f92-701f-4dae-a00a-3a3d753d8c17"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.111591 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "483c4f92-701f-4dae-a00a-3a3d753d8c17" (UID: "483c4f92-701f-4dae-a00a-3a3d753d8c17"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.119033 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "483c4f92-701f-4dae-a00a-3a3d753d8c17" (UID: "483c4f92-701f-4dae-a00a-3a3d753d8c17"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.130148 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.140045 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-config-data" (OuterVolumeSpecName: "config-data") pod "483c4f92-701f-4dae-a00a-3a3d753d8c17" (UID: "483c4f92-701f-4dae-a00a-3a3d753d8c17"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.181067 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.181094 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.181105 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvslz\" (UniqueName: \"kubernetes.io/projected/483c4f92-701f-4dae-a00a-3a3d753d8c17-kube-api-access-zvslz\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.181116 5045 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.181124 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/483c4f92-701f-4dae-a00a-3a3d753d8c17-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.539963 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-lr4xs" event={"ID":"483c4f92-701f-4dae-a00a-3a3d753d8c17","Type":"ContainerDied","Data":"6657ae27fc98f1c822c387706514d87f9bd2d2bc73b9b968a29d604d9a32d140"} Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.540945 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6657ae27fc98f1c822c387706514d87f9bd2d2bc73b9b968a29d604d9a32d140" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.541270 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-lr4xs" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.551593 5045 generic.go:334] "Generic (PLEG): container finished" podID="d264a9e9-964e-4764-bb10-466754e4e77a" containerID="bdec37632121d3535f3dfdb6b0610b5374b313598ecb6fcf11b0ddb7a5f880eb" exitCode=0 Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.551664 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d264a9e9-964e-4764-bb10-466754e4e77a","Type":"ContainerDied","Data":"bdec37632121d3535f3dfdb6b0610b5374b313598ecb6fcf11b0ddb7a5f880eb"} Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.553658 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.781041 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 23:18:35 crc kubenswrapper[5045]: E1125 23:18:35.781358 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="483c4f92-701f-4dae-a00a-3a3d753d8c17" containerName="cinder-db-sync" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.781374 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="483c4f92-701f-4dae-a00a-3a3d753d8c17" containerName="cinder-db-sync" Nov 25 23:18:35 crc kubenswrapper[5045]: E1125 23:18:35.781391 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82d48403-8197-427a-863b-e61cf561bb37" containerName="dnsmasq-dns" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.781398 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="82d48403-8197-427a-863b-e61cf561bb37" containerName="dnsmasq-dns" Nov 25 23:18:35 crc kubenswrapper[5045]: E1125 23:18:35.781415 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82d48403-8197-427a-863b-e61cf561bb37" containerName="init" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.781420 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="82d48403-8197-427a-863b-e61cf561bb37" containerName="init" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.781563 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="82d48403-8197-427a-863b-e61cf561bb37" containerName="dnsmasq-dns" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.781592 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="483c4f92-701f-4dae-a00a-3a3d753d8c17" containerName="cinder-db-sync" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.782401 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.784496 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.785884 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.786657 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-7jh9q" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.805155 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.817845 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.877126 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-58db5546cc-dnvz9"] Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.878897 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.893295 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.893408 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.893493 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-scripts\") pod \"cinder-scheduler-0\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.893527 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.893570 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzw7l\" (UniqueName: \"kubernetes.io/projected/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-kube-api-access-fzw7l\") pod \"cinder-scheduler-0\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.893593 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-config-data\") pod \"cinder-scheduler-0\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.930257 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58db5546cc-dnvz9"] Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.994562 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.994620 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-dns-svc\") pod \"dnsmasq-dns-58db5546cc-dnvz9\" (UID: \"bc3d9a68-5746-4875-b808-3b335918a6f4\") " pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.994688 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.994704 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-config\") pod \"dnsmasq-dns-58db5546cc-dnvz9\" (UID: \"bc3d9a68-5746-4875-b808-3b335918a6f4\") " pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.994734 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-ovsdbserver-nb\") pod \"dnsmasq-dns-58db5546cc-dnvz9\" (UID: \"bc3d9a68-5746-4875-b808-3b335918a6f4\") " pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.994797 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-scripts\") pod \"cinder-scheduler-0\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.994818 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.994844 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kprsj\" (UniqueName: \"kubernetes.io/projected/bc3d9a68-5746-4875-b808-3b335918a6f4-kube-api-access-kprsj\") pod \"dnsmasq-dns-58db5546cc-dnvz9\" (UID: \"bc3d9a68-5746-4875-b808-3b335918a6f4\") " pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.994865 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-ovsdbserver-sb\") pod \"dnsmasq-dns-58db5546cc-dnvz9\" (UID: \"bc3d9a68-5746-4875-b808-3b335918a6f4\") " pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.994892 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-config-data\") pod \"cinder-scheduler-0\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.994915 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzw7l\" (UniqueName: \"kubernetes.io/projected/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-kube-api-access-fzw7l\") pod \"cinder-scheduler-0\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:35 crc kubenswrapper[5045]: I1125 23:18:35.996023 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.002670 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-config-data\") pod \"cinder-scheduler-0\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.003707 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.036007 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzw7l\" (UniqueName: \"kubernetes.io/projected/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-kube-api-access-fzw7l\") pod \"cinder-scheduler-0\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.036076 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-scripts\") pod \"cinder-scheduler-0\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.039431 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.039901 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.041515 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.045916 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.054778 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.096265 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-config\") pod \"dnsmasq-dns-58db5546cc-dnvz9\" (UID: \"bc3d9a68-5746-4875-b808-3b335918a6f4\") " pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.096310 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-ovsdbserver-nb\") pod \"dnsmasq-dns-58db5546cc-dnvz9\" (UID: \"bc3d9a68-5746-4875-b808-3b335918a6f4\") " pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.096380 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kprsj\" (UniqueName: \"kubernetes.io/projected/bc3d9a68-5746-4875-b808-3b335918a6f4-kube-api-access-kprsj\") pod \"dnsmasq-dns-58db5546cc-dnvz9\" (UID: \"bc3d9a68-5746-4875-b808-3b335918a6f4\") " pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.096402 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-ovsdbserver-sb\") pod \"dnsmasq-dns-58db5546cc-dnvz9\" (UID: \"bc3d9a68-5746-4875-b808-3b335918a6f4\") " pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.096435 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-dns-svc\") pod \"dnsmasq-dns-58db5546cc-dnvz9\" (UID: \"bc3d9a68-5746-4875-b808-3b335918a6f4\") " pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.097318 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-dns-svc\") pod \"dnsmasq-dns-58db5546cc-dnvz9\" (UID: \"bc3d9a68-5746-4875-b808-3b335918a6f4\") " pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.097609 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-ovsdbserver-nb\") pod \"dnsmasq-dns-58db5546cc-dnvz9\" (UID: \"bc3d9a68-5746-4875-b808-3b335918a6f4\") " pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.098197 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-ovsdbserver-sb\") pod \"dnsmasq-dns-58db5546cc-dnvz9\" (UID: \"bc3d9a68-5746-4875-b808-3b335918a6f4\") " pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.100093 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-config\") pod \"dnsmasq-dns-58db5546cc-dnvz9\" (UID: \"bc3d9a68-5746-4875-b808-3b335918a6f4\") " pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.106164 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.116960 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kprsj\" (UniqueName: \"kubernetes.io/projected/bc3d9a68-5746-4875-b808-3b335918a6f4-kube-api-access-kprsj\") pod \"dnsmasq-dns-58db5546cc-dnvz9\" (UID: \"bc3d9a68-5746-4875-b808-3b335918a6f4\") " pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.198663 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.198733 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9b9340dd-6c33-4750-9fdd-5c588805522a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.198775 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrj78\" (UniqueName: \"kubernetes.io/projected/9b9340dd-6c33-4750-9fdd-5c588805522a-kube-api-access-vrj78\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.198821 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-scripts\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.198853 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-config-data-custom\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.198889 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-config-data\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.198915 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b9340dd-6c33-4750-9fdd-5c588805522a-logs\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.207082 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.300341 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.300879 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9b9340dd-6c33-4750-9fdd-5c588805522a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.300931 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrj78\" (UniqueName: \"kubernetes.io/projected/9b9340dd-6c33-4750-9fdd-5c588805522a-kube-api-access-vrj78\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.300983 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-scripts\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.301010 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-config-data-custom\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.301049 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-config-data\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.301085 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b9340dd-6c33-4750-9fdd-5c588805522a-logs\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.301476 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9b9340dd-6c33-4750-9fdd-5c588805522a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.302270 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b9340dd-6c33-4750-9fdd-5c588805522a-logs\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.309167 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-config-data\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.309905 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-scripts\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.310456 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.311024 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-config-data-custom\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.335187 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrj78\" (UniqueName: \"kubernetes.io/projected/9b9340dd-6c33-4750-9fdd-5c588805522a-kube-api-access-vrj78\") pod \"cinder-api-0\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.368866 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.413453 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82d48403-8197-427a-863b-e61cf561bb37" path="/var/lib/kubelet/pods/82d48403-8197-427a-863b-e61cf561bb37/volumes" Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.658190 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.770955 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58db5546cc-dnvz9"] Nov 25 23:18:36 crc kubenswrapper[5045]: I1125 23:18:36.937102 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 23:18:37 crc kubenswrapper[5045]: I1125 23:18:37.577235 5045 generic.go:334] "Generic (PLEG): container finished" podID="bc3d9a68-5746-4875-b808-3b335918a6f4" containerID="c3fa77edafac5664d10858f50251d823ff6cd27d7e7d23d5a270e66f6fe5fe29" exitCode=0 Nov 25 23:18:37 crc kubenswrapper[5045]: I1125 23:18:37.577382 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" event={"ID":"bc3d9a68-5746-4875-b808-3b335918a6f4","Type":"ContainerDied","Data":"c3fa77edafac5664d10858f50251d823ff6cd27d7e7d23d5a270e66f6fe5fe29"} Nov 25 23:18:37 crc kubenswrapper[5045]: I1125 23:18:37.577597 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" event={"ID":"bc3d9a68-5746-4875-b808-3b335918a6f4","Type":"ContainerStarted","Data":"66724663fd3a52bbd8e950631f5ffaf38422441fffcf158e7ae869fa504bd199"} Nov 25 23:18:37 crc kubenswrapper[5045]: I1125 23:18:37.583550 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d3c3ae6e-ef55-4064-88ed-e95ab657b84e","Type":"ContainerStarted","Data":"a0556a9cbe975e1286f8a198286f95af683c8094cae571ba3a1388c0d9709147"} Nov 25 23:18:37 crc kubenswrapper[5045]: I1125 23:18:37.585210 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"9b9340dd-6c33-4750-9fdd-5c588805522a","Type":"ContainerStarted","Data":"83ba3f00b3e5d5984fcc1d326f03f8ba24b12b7e8061af2872de997c386237c8"} Nov 25 23:18:37 crc kubenswrapper[5045]: I1125 23:18:37.684990 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:37 crc kubenswrapper[5045]: I1125 23:18:37.860516 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6477c784d-pjbws" Nov 25 23:18:37 crc kubenswrapper[5045]: I1125 23:18:37.958888 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5f7f57bf4-26bpl"] Nov 25 23:18:37 crc kubenswrapper[5045]: I1125 23:18:37.959099 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5f7f57bf4-26bpl" podUID="ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1" containerName="barbican-api-log" containerID="cri-o://b4702f8cca3a5df39a7dbb659d4cdafcb2fc2233f3fbfc7de1a76ab48ebb448c" gracePeriod=30 Nov 25 23:18:37 crc kubenswrapper[5045]: I1125 23:18:37.959227 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5f7f57bf4-26bpl" podUID="ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1" containerName="barbican-api" containerID="cri-o://0bea458b02e01977810082a9a475f6175616ce36495a63e84943a21da03f43d1" gracePeriod=30 Nov 25 23:18:38 crc kubenswrapper[5045]: I1125 23:18:38.031451 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 23:18:38 crc kubenswrapper[5045]: I1125 23:18:38.604435 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"9b9340dd-6c33-4750-9fdd-5c588805522a","Type":"ContainerStarted","Data":"76e8df49f72be3137a2d869a5c038b69082782bfc66404c7cc4e441fed0d217a"} Nov 25 23:18:38 crc kubenswrapper[5045]: I1125 23:18:38.605065 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"9b9340dd-6c33-4750-9fdd-5c588805522a","Type":"ContainerStarted","Data":"e8315c17d0c389578cbc69011a4ac66311cc92d437dd36c918828abf7bf9b5a2"} Nov 25 23:18:38 crc kubenswrapper[5045]: I1125 23:18:38.605091 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 23:18:38 crc kubenswrapper[5045]: I1125 23:18:38.604770 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="9b9340dd-6c33-4750-9fdd-5c588805522a" containerName="cinder-api-log" containerID="cri-o://e8315c17d0c389578cbc69011a4ac66311cc92d437dd36c918828abf7bf9b5a2" gracePeriod=30 Nov 25 23:18:38 crc kubenswrapper[5045]: I1125 23:18:38.605203 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="9b9340dd-6c33-4750-9fdd-5c588805522a" containerName="cinder-api" containerID="cri-o://76e8df49f72be3137a2d869a5c038b69082782bfc66404c7cc4e441fed0d217a" gracePeriod=30 Nov 25 23:18:38 crc kubenswrapper[5045]: I1125 23:18:38.632737 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=2.632704586 podStartE2EDuration="2.632704586s" podCreationTimestamp="2025-11-25 23:18:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:18:38.627006051 +0000 UTC m=+1174.984665173" watchObservedRunningTime="2025-11-25 23:18:38.632704586 +0000 UTC m=+1174.990363698" Nov 25 23:18:38 crc kubenswrapper[5045]: I1125 23:18:38.634918 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" event={"ID":"bc3d9a68-5746-4875-b808-3b335918a6f4","Type":"ContainerStarted","Data":"f6e511838c41c01c7318b486d5c136d23c95fd41fc99487bfcebd1ca2d200c00"} Nov 25 23:18:38 crc kubenswrapper[5045]: I1125 23:18:38.635863 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:18:38 crc kubenswrapper[5045]: I1125 23:18:38.642093 5045 generic.go:334] "Generic (PLEG): container finished" podID="ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1" containerID="b4702f8cca3a5df39a7dbb659d4cdafcb2fc2233f3fbfc7de1a76ab48ebb448c" exitCode=143 Nov 25 23:18:38 crc kubenswrapper[5045]: I1125 23:18:38.642138 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f7f57bf4-26bpl" event={"ID":"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1","Type":"ContainerDied","Data":"b4702f8cca3a5df39a7dbb659d4cdafcb2fc2233f3fbfc7de1a76ab48ebb448c"} Nov 25 23:18:38 crc kubenswrapper[5045]: I1125 23:18:38.644073 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d3c3ae6e-ef55-4064-88ed-e95ab657b84e","Type":"ContainerStarted","Data":"78efff912c37c6784e2dd4523106569c57f4a9caa7a5e9a6a43d1aa2fb548532"} Nov 25 23:18:38 crc kubenswrapper[5045]: I1125 23:18:38.656468 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" podStartSLOduration=3.656457505 podStartE2EDuration="3.656457505s" podCreationTimestamp="2025-11-25 23:18:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:18:38.654117687 +0000 UTC m=+1175.011776799" watchObservedRunningTime="2025-11-25 23:18:38.656457505 +0000 UTC m=+1175.014116617" Nov 25 23:18:38 crc kubenswrapper[5045]: I1125 23:18:38.716238 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5b6dbdb6f5-47bq9" podUID="82d48403-8197-427a-863b-e61cf561bb37" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.136:5353: i/o timeout" Nov 25 23:18:39 crc kubenswrapper[5045]: I1125 23:18:39.653337 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d3c3ae6e-ef55-4064-88ed-e95ab657b84e","Type":"ContainerStarted","Data":"5969eb7584a62208f41308cb204ec05ab4c120a7619d1dcde95783f03853c06c"} Nov 25 23:18:39 crc kubenswrapper[5045]: I1125 23:18:39.656077 5045 generic.go:334] "Generic (PLEG): container finished" podID="9b9340dd-6c33-4750-9fdd-5c588805522a" containerID="e8315c17d0c389578cbc69011a4ac66311cc92d437dd36c918828abf7bf9b5a2" exitCode=143 Nov 25 23:18:39 crc kubenswrapper[5045]: I1125 23:18:39.656155 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"9b9340dd-6c33-4750-9fdd-5c588805522a","Type":"ContainerDied","Data":"e8315c17d0c389578cbc69011a4ac66311cc92d437dd36c918828abf7bf9b5a2"} Nov 25 23:18:39 crc kubenswrapper[5045]: I1125 23:18:39.676275 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.922603521 podStartE2EDuration="4.676262094s" podCreationTimestamp="2025-11-25 23:18:35 +0000 UTC" firstStartedPulling="2025-11-25 23:18:36.663238392 +0000 UTC m=+1173.020897504" lastFinishedPulling="2025-11-25 23:18:37.416896965 +0000 UTC m=+1173.774556077" observedRunningTime="2025-11-25 23:18:39.670977471 +0000 UTC m=+1176.028636583" watchObservedRunningTime="2025-11-25 23:18:39.676262094 +0000 UTC m=+1176.033921206" Nov 25 23:18:41 crc kubenswrapper[5045]: I1125 23:18:41.107254 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 23:18:41 crc kubenswrapper[5045]: I1125 23:18:41.450913 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5f7f57bf4-26bpl" podUID="ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.146:9311/healthcheck\": read tcp 10.217.0.2:47252->10.217.0.146:9311: read: connection reset by peer" Nov 25 23:18:41 crc kubenswrapper[5045]: I1125 23:18:41.451007 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5f7f57bf4-26bpl" podUID="ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.146:9311/healthcheck\": read tcp 10.217.0.2:47238->10.217.0.146:9311: read: connection reset by peer" Nov 25 23:18:41 crc kubenswrapper[5045]: I1125 23:18:41.690574 5045 generic.go:334] "Generic (PLEG): container finished" podID="ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1" containerID="0bea458b02e01977810082a9a475f6175616ce36495a63e84943a21da03f43d1" exitCode=0 Nov 25 23:18:41 crc kubenswrapper[5045]: I1125 23:18:41.691738 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f7f57bf4-26bpl" event={"ID":"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1","Type":"ContainerDied","Data":"0bea458b02e01977810082a9a475f6175616ce36495a63e84943a21da03f43d1"} Nov 25 23:18:41 crc kubenswrapper[5045]: I1125 23:18:41.925067 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.028606 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-combined-ca-bundle\") pod \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\" (UID: \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\") " Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.028672 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-config-data-custom\") pod \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\" (UID: \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\") " Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.028752 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxwpk\" (UniqueName: \"kubernetes.io/projected/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-kube-api-access-xxwpk\") pod \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\" (UID: \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\") " Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.028925 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-config-data\") pod \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\" (UID: \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\") " Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.029002 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-logs\") pod \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\" (UID: \"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1\") " Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.030136 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-logs" (OuterVolumeSpecName: "logs") pod "ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1" (UID: "ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.042770 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1" (UID: "ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.053938 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-kube-api-access-xxwpk" (OuterVolumeSpecName: "kube-api-access-xxwpk") pod "ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1" (UID: "ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1"). InnerVolumeSpecName "kube-api-access-xxwpk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.070352 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1" (UID: "ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.105338 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-config-data" (OuterVolumeSpecName: "config-data") pod "ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1" (UID: "ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.130829 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.131655 5045 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.131769 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xxwpk\" (UniqueName: \"kubernetes.io/projected/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-kube-api-access-xxwpk\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.131832 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.131883 5045 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1-logs\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.703582 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f7f57bf4-26bpl" event={"ID":"ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1","Type":"ContainerDied","Data":"8734754bdecdd981b9aa02aac9870c5bc90cec69758d88c733cc1e9bc08b0c62"} Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.703636 5045 scope.go:117] "RemoveContainer" containerID="0bea458b02e01977810082a9a475f6175616ce36495a63e84943a21da03f43d1" Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.703636 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5f7f57bf4-26bpl" Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.731371 5045 scope.go:117] "RemoveContainer" containerID="b4702f8cca3a5df39a7dbb659d4cdafcb2fc2233f3fbfc7de1a76ab48ebb448c" Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.746510 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5f7f57bf4-26bpl"] Nov 25 23:18:42 crc kubenswrapper[5045]: I1125 23:18:42.755529 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-5f7f57bf4-26bpl"] Nov 25 23:18:44 crc kubenswrapper[5045]: I1125 23:18:44.411358 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1" path="/var/lib/kubelet/pods/ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1/volumes" Nov 25 23:18:45 crc kubenswrapper[5045]: I1125 23:18:45.526012 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.208697 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.282814 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-869f779d85-s5jgk"] Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.283107 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-869f779d85-s5jgk" podUID="391a1333-2eca-4967-9d3f-713c564c0bfe" containerName="dnsmasq-dns" containerID="cri-o://142088eeab9eb9a65ddbc000fc03e76c306a501e3634df6ec644771acecdaf13" gracePeriod=10 Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.313920 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.382879 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.748852 5045 generic.go:334] "Generic (PLEG): container finished" podID="391a1333-2eca-4967-9d3f-713c564c0bfe" containerID="142088eeab9eb9a65ddbc000fc03e76c306a501e3634df6ec644771acecdaf13" exitCode=0 Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.748938 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-869f779d85-s5jgk" event={"ID":"391a1333-2eca-4967-9d3f-713c564c0bfe","Type":"ContainerDied","Data":"142088eeab9eb9a65ddbc000fc03e76c306a501e3634df6ec644771acecdaf13"} Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.749034 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-869f779d85-s5jgk" event={"ID":"391a1333-2eca-4967-9d3f-713c564c0bfe","Type":"ContainerDied","Data":"ec606593ba8b37c3ff9afc5d1827f9588773670a284c6ff9f2ce374e50b18760"} Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.749049 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec606593ba8b37c3ff9afc5d1827f9588773670a284c6ff9f2ce374e50b18760" Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.749122 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="d3c3ae6e-ef55-4064-88ed-e95ab657b84e" containerName="cinder-scheduler" containerID="cri-o://78efff912c37c6784e2dd4523106569c57f4a9caa7a5e9a6a43d1aa2fb548532" gracePeriod=30 Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.749585 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="d3c3ae6e-ef55-4064-88ed-e95ab657b84e" containerName="probe" containerID="cri-o://5969eb7584a62208f41308cb204ec05ab4c120a7619d1dcde95783f03853c06c" gracePeriod=30 Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.795311 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.820309 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9g7xr\" (UniqueName: \"kubernetes.io/projected/391a1333-2eca-4967-9d3f-713c564c0bfe-kube-api-access-9g7xr\") pod \"391a1333-2eca-4967-9d3f-713c564c0bfe\" (UID: \"391a1333-2eca-4967-9d3f-713c564c0bfe\") " Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.820377 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-ovsdbserver-nb\") pod \"391a1333-2eca-4967-9d3f-713c564c0bfe\" (UID: \"391a1333-2eca-4967-9d3f-713c564c0bfe\") " Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.820438 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-config\") pod \"391a1333-2eca-4967-9d3f-713c564c0bfe\" (UID: \"391a1333-2eca-4967-9d3f-713c564c0bfe\") " Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.821381 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-ovsdbserver-sb\") pod \"391a1333-2eca-4967-9d3f-713c564c0bfe\" (UID: \"391a1333-2eca-4967-9d3f-713c564c0bfe\") " Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.821468 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-dns-svc\") pod \"391a1333-2eca-4967-9d3f-713c564c0bfe\" (UID: \"391a1333-2eca-4967-9d3f-713c564c0bfe\") " Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.826152 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/391a1333-2eca-4967-9d3f-713c564c0bfe-kube-api-access-9g7xr" (OuterVolumeSpecName: "kube-api-access-9g7xr") pod "391a1333-2eca-4967-9d3f-713c564c0bfe" (UID: "391a1333-2eca-4967-9d3f-713c564c0bfe"). InnerVolumeSpecName "kube-api-access-9g7xr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.896655 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "391a1333-2eca-4967-9d3f-713c564c0bfe" (UID: "391a1333-2eca-4967-9d3f-713c564c0bfe"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.897431 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "391a1333-2eca-4967-9d3f-713c564c0bfe" (UID: "391a1333-2eca-4967-9d3f-713c564c0bfe"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.912111 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "391a1333-2eca-4967-9d3f-713c564c0bfe" (UID: "391a1333-2eca-4967-9d3f-713c564c0bfe"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.926599 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9g7xr\" (UniqueName: \"kubernetes.io/projected/391a1333-2eca-4967-9d3f-713c564c0bfe-kube-api-access-9g7xr\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.926871 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.926907 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.926945 5045 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:46 crc kubenswrapper[5045]: I1125 23:18:46.936307 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-config" (OuterVolumeSpecName: "config") pod "391a1333-2eca-4967-9d3f-713c564c0bfe" (UID: "391a1333-2eca-4967-9d3f-713c564c0bfe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:18:47 crc kubenswrapper[5045]: I1125 23:18:47.028448 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/391a1333-2eca-4967-9d3f-713c564c0bfe-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:47 crc kubenswrapper[5045]: I1125 23:18:47.759819 5045 generic.go:334] "Generic (PLEG): container finished" podID="d3c3ae6e-ef55-4064-88ed-e95ab657b84e" containerID="5969eb7584a62208f41308cb204ec05ab4c120a7619d1dcde95783f03853c06c" exitCode=0 Nov 25 23:18:47 crc kubenswrapper[5045]: I1125 23:18:47.759865 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d3c3ae6e-ef55-4064-88ed-e95ab657b84e","Type":"ContainerDied","Data":"5969eb7584a62208f41308cb204ec05ab4c120a7619d1dcde95783f03853c06c"} Nov 25 23:18:47 crc kubenswrapper[5045]: I1125 23:18:47.759915 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-869f779d85-s5jgk" Nov 25 23:18:47 crc kubenswrapper[5045]: I1125 23:18:47.794242 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-869f779d85-s5jgk"] Nov 25 23:18:47 crc kubenswrapper[5045]: I1125 23:18:47.803705 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-869f779d85-s5jgk"] Nov 25 23:18:47 crc kubenswrapper[5045]: I1125 23:18:47.934741 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5db8cdc695-2sz2g" Nov 25 23:18:48 crc kubenswrapper[5045]: I1125 23:18:48.021203 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-64bf94568b-qzrfh"] Nov 25 23:18:48 crc kubenswrapper[5045]: I1125 23:18:48.021499 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-64bf94568b-qzrfh" podUID="55785523-a194-47c5-ad63-5955cc73241e" containerName="neutron-httpd" containerID="cri-o://a09aea8b0c2a3c94c914667901dfd3ee6b1d501d9295f8c98796f23ab77d197d" gracePeriod=30 Nov 25 23:18:48 crc kubenswrapper[5045]: I1125 23:18:48.021955 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-64bf94568b-qzrfh" podUID="55785523-a194-47c5-ad63-5955cc73241e" containerName="neutron-api" containerID="cri-o://6bf2d5e4a5c4e921dddf93c952627bd84c8581f90c30f336ea367eca29affe72" gracePeriod=30 Nov 25 23:18:48 crc kubenswrapper[5045]: I1125 23:18:48.407962 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="391a1333-2eca-4967-9d3f-713c564c0bfe" path="/var/lib/kubelet/pods/391a1333-2eca-4967-9d3f-713c564c0bfe/volumes" Nov 25 23:18:48 crc kubenswrapper[5045]: I1125 23:18:48.422612 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 25 23:18:48 crc kubenswrapper[5045]: I1125 23:18:48.730931 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="d264a9e9-964e-4764-bb10-466754e4e77a" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 23:18:48 crc kubenswrapper[5045]: I1125 23:18:48.769870 5045 generic.go:334] "Generic (PLEG): container finished" podID="55785523-a194-47c5-ad63-5955cc73241e" containerID="a09aea8b0c2a3c94c914667901dfd3ee6b1d501d9295f8c98796f23ab77d197d" exitCode=0 Nov 25 23:18:48 crc kubenswrapper[5045]: I1125 23:18:48.769930 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64bf94568b-qzrfh" event={"ID":"55785523-a194-47c5-ad63-5955cc73241e","Type":"ContainerDied","Data":"a09aea8b0c2a3c94c914667901dfd3ee6b1d501d9295f8c98796f23ab77d197d"} Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.351069 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.472263 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-combined-ca-bundle\") pod \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.472357 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-scripts\") pod \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.472378 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fzw7l\" (UniqueName: \"kubernetes.io/projected/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-kube-api-access-fzw7l\") pod \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.472396 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-config-data\") pod \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.472449 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-etc-machine-id\") pod \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.472467 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-config-data-custom\") pod \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\" (UID: \"d3c3ae6e-ef55-4064-88ed-e95ab657b84e\") " Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.473171 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d3c3ae6e-ef55-4064-88ed-e95ab657b84e" (UID: "d3c3ae6e-ef55-4064-88ed-e95ab657b84e"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.477202 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-scripts" (OuterVolumeSpecName: "scripts") pod "d3c3ae6e-ef55-4064-88ed-e95ab657b84e" (UID: "d3c3ae6e-ef55-4064-88ed-e95ab657b84e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.478927 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d3c3ae6e-ef55-4064-88ed-e95ab657b84e" (UID: "d3c3ae6e-ef55-4064-88ed-e95ab657b84e"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.490964 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-kube-api-access-fzw7l" (OuterVolumeSpecName: "kube-api-access-fzw7l") pod "d3c3ae6e-ef55-4064-88ed-e95ab657b84e" (UID: "d3c3ae6e-ef55-4064-88ed-e95ab657b84e"). InnerVolumeSpecName "kube-api-access-fzw7l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.512532 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d3c3ae6e-ef55-4064-88ed-e95ab657b84e" (UID: "d3c3ae6e-ef55-4064-88ed-e95ab657b84e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.573831 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fzw7l\" (UniqueName: \"kubernetes.io/projected/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-kube-api-access-fzw7l\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.573857 5045 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.573865 5045 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.573887 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.573898 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.583860 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-config-data" (OuterVolumeSpecName: "config-data") pod "d3c3ae6e-ef55-4064-88ed-e95ab657b84e" (UID: "d3c3ae6e-ef55-4064-88ed-e95ab657b84e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.675775 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3c3ae6e-ef55-4064-88ed-e95ab657b84e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.787501 5045 generic.go:334] "Generic (PLEG): container finished" podID="d3c3ae6e-ef55-4064-88ed-e95ab657b84e" containerID="78efff912c37c6784e2dd4523106569c57f4a9caa7a5e9a6a43d1aa2fb548532" exitCode=0 Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.787548 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d3c3ae6e-ef55-4064-88ed-e95ab657b84e","Type":"ContainerDied","Data":"78efff912c37c6784e2dd4523106569c57f4a9caa7a5e9a6a43d1aa2fb548532"} Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.787574 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d3c3ae6e-ef55-4064-88ed-e95ab657b84e","Type":"ContainerDied","Data":"a0556a9cbe975e1286f8a198286f95af683c8094cae571ba3a1388c0d9709147"} Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.787599 5045 scope.go:117] "RemoveContainer" containerID="5969eb7584a62208f41308cb204ec05ab4c120a7619d1dcde95783f03853c06c" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.787906 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.816461 5045 scope.go:117] "RemoveContainer" containerID="78efff912c37c6784e2dd4523106569c57f4a9caa7a5e9a6a43d1aa2fb548532" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.825191 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.838678 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.846571 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 23:18:49 crc kubenswrapper[5045]: E1125 23:18:49.846974 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="391a1333-2eca-4967-9d3f-713c564c0bfe" containerName="dnsmasq-dns" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.846990 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="391a1333-2eca-4967-9d3f-713c564c0bfe" containerName="dnsmasq-dns" Nov 25 23:18:49 crc kubenswrapper[5045]: E1125 23:18:49.847006 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3c3ae6e-ef55-4064-88ed-e95ab657b84e" containerName="cinder-scheduler" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.847012 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3c3ae6e-ef55-4064-88ed-e95ab657b84e" containerName="cinder-scheduler" Nov 25 23:18:49 crc kubenswrapper[5045]: E1125 23:18:49.847025 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3c3ae6e-ef55-4064-88ed-e95ab657b84e" containerName="probe" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.847031 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3c3ae6e-ef55-4064-88ed-e95ab657b84e" containerName="probe" Nov 25 23:18:49 crc kubenswrapper[5045]: E1125 23:18:49.847043 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="391a1333-2eca-4967-9d3f-713c564c0bfe" containerName="init" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.847049 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="391a1333-2eca-4967-9d3f-713c564c0bfe" containerName="init" Nov 25 23:18:49 crc kubenswrapper[5045]: E1125 23:18:49.847058 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1" containerName="barbican-api" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.847063 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1" containerName="barbican-api" Nov 25 23:18:49 crc kubenswrapper[5045]: E1125 23:18:49.847077 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1" containerName="barbican-api-log" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.847083 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1" containerName="barbican-api-log" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.847249 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3c3ae6e-ef55-4064-88ed-e95ab657b84e" containerName="cinder-scheduler" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.847262 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1" containerName="barbican-api-log" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.847276 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3c3ae6e-ef55-4064-88ed-e95ab657b84e" containerName="probe" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.847284 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="391a1333-2eca-4967-9d3f-713c564c0bfe" containerName="dnsmasq-dns" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.847295 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="ecbd3988-6dd2-4f7c-b59e-ec1483e6fab1" containerName="barbican-api" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.848159 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.848229 5045 scope.go:117] "RemoveContainer" containerID="5969eb7584a62208f41308cb204ec05ab4c120a7619d1dcde95783f03853c06c" Nov 25 23:18:49 crc kubenswrapper[5045]: E1125 23:18:49.849087 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5969eb7584a62208f41308cb204ec05ab4c120a7619d1dcde95783f03853c06c\": container with ID starting with 5969eb7584a62208f41308cb204ec05ab4c120a7619d1dcde95783f03853c06c not found: ID does not exist" containerID="5969eb7584a62208f41308cb204ec05ab4c120a7619d1dcde95783f03853c06c" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.849170 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5969eb7584a62208f41308cb204ec05ab4c120a7619d1dcde95783f03853c06c"} err="failed to get container status \"5969eb7584a62208f41308cb204ec05ab4c120a7619d1dcde95783f03853c06c\": rpc error: code = NotFound desc = could not find container \"5969eb7584a62208f41308cb204ec05ab4c120a7619d1dcde95783f03853c06c\": container with ID starting with 5969eb7584a62208f41308cb204ec05ab4c120a7619d1dcde95783f03853c06c not found: ID does not exist" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.849228 5045 scope.go:117] "RemoveContainer" containerID="78efff912c37c6784e2dd4523106569c57f4a9caa7a5e9a6a43d1aa2fb548532" Nov 25 23:18:49 crc kubenswrapper[5045]: E1125 23:18:49.849416 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78efff912c37c6784e2dd4523106569c57f4a9caa7a5e9a6a43d1aa2fb548532\": container with ID starting with 78efff912c37c6784e2dd4523106569c57f4a9caa7a5e9a6a43d1aa2fb548532 not found: ID does not exist" containerID="78efff912c37c6784e2dd4523106569c57f4a9caa7a5e9a6a43d1aa2fb548532" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.849446 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78efff912c37c6784e2dd4523106569c57f4a9caa7a5e9a6a43d1aa2fb548532"} err="failed to get container status \"78efff912c37c6784e2dd4523106569c57f4a9caa7a5e9a6a43d1aa2fb548532\": rpc error: code = NotFound desc = could not find container \"78efff912c37c6784e2dd4523106569c57f4a9caa7a5e9a6a43d1aa2fb548532\": container with ID starting with 78efff912c37c6784e2dd4523106569c57f4a9caa7a5e9a6a43d1aa2fb548532 not found: ID does not exist" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.854097 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.860729 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.882724 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efac58ce-5053-4a60-bcd4-41b7c1f483f2-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"efac58ce-5053-4a60-bcd4-41b7c1f483f2\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.882794 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/efac58ce-5053-4a60-bcd4-41b7c1f483f2-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"efac58ce-5053-4a60-bcd4-41b7c1f483f2\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.882891 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/efac58ce-5053-4a60-bcd4-41b7c1f483f2-scripts\") pod \"cinder-scheduler-0\" (UID: \"efac58ce-5053-4a60-bcd4-41b7c1f483f2\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.883792 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/efac58ce-5053-4a60-bcd4-41b7c1f483f2-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"efac58ce-5053-4a60-bcd4-41b7c1f483f2\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.883819 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qq5lc\" (UniqueName: \"kubernetes.io/projected/efac58ce-5053-4a60-bcd4-41b7c1f483f2-kube-api-access-qq5lc\") pod \"cinder-scheduler-0\" (UID: \"efac58ce-5053-4a60-bcd4-41b7c1f483f2\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.883846 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efac58ce-5053-4a60-bcd4-41b7c1f483f2-config-data\") pod \"cinder-scheduler-0\" (UID: \"efac58ce-5053-4a60-bcd4-41b7c1f483f2\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.985023 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efac58ce-5053-4a60-bcd4-41b7c1f483f2-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"efac58ce-5053-4a60-bcd4-41b7c1f483f2\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.985098 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/efac58ce-5053-4a60-bcd4-41b7c1f483f2-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"efac58ce-5053-4a60-bcd4-41b7c1f483f2\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.985148 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/efac58ce-5053-4a60-bcd4-41b7c1f483f2-scripts\") pod \"cinder-scheduler-0\" (UID: \"efac58ce-5053-4a60-bcd4-41b7c1f483f2\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.985200 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/efac58ce-5053-4a60-bcd4-41b7c1f483f2-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"efac58ce-5053-4a60-bcd4-41b7c1f483f2\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.985218 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qq5lc\" (UniqueName: \"kubernetes.io/projected/efac58ce-5053-4a60-bcd4-41b7c1f483f2-kube-api-access-qq5lc\") pod \"cinder-scheduler-0\" (UID: \"efac58ce-5053-4a60-bcd4-41b7c1f483f2\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.985236 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efac58ce-5053-4a60-bcd4-41b7c1f483f2-config-data\") pod \"cinder-scheduler-0\" (UID: \"efac58ce-5053-4a60-bcd4-41b7c1f483f2\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.988812 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/efac58ce-5053-4a60-bcd4-41b7c1f483f2-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"efac58ce-5053-4a60-bcd4-41b7c1f483f2\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.989344 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efac58ce-5053-4a60-bcd4-41b7c1f483f2-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"efac58ce-5053-4a60-bcd4-41b7c1f483f2\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.990279 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/efac58ce-5053-4a60-bcd4-41b7c1f483f2-scripts\") pod \"cinder-scheduler-0\" (UID: \"efac58ce-5053-4a60-bcd4-41b7c1f483f2\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.991979 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/efac58ce-5053-4a60-bcd4-41b7c1f483f2-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"efac58ce-5053-4a60-bcd4-41b7c1f483f2\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:49 crc kubenswrapper[5045]: I1125 23:18:49.993569 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efac58ce-5053-4a60-bcd4-41b7c1f483f2-config-data\") pod \"cinder-scheduler-0\" (UID: \"efac58ce-5053-4a60-bcd4-41b7c1f483f2\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:50 crc kubenswrapper[5045]: I1125 23:18:50.004765 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qq5lc\" (UniqueName: \"kubernetes.io/projected/efac58ce-5053-4a60-bcd4-41b7c1f483f2-kube-api-access-qq5lc\") pod \"cinder-scheduler-0\" (UID: \"efac58ce-5053-4a60-bcd4-41b7c1f483f2\") " pod="openstack/cinder-scheduler-0" Nov 25 23:18:50 crc kubenswrapper[5045]: I1125 23:18:50.171106 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 23:18:50 crc kubenswrapper[5045]: I1125 23:18:50.410186 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3c3ae6e-ef55-4064-88ed-e95ab657b84e" path="/var/lib/kubelet/pods/d3c3ae6e-ef55-4064-88ed-e95ab657b84e/volumes" Nov 25 23:18:50 crc kubenswrapper[5045]: I1125 23:18:50.660042 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 23:18:50 crc kubenswrapper[5045]: I1125 23:18:50.833073 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"efac58ce-5053-4a60-bcd4-41b7c1f483f2","Type":"ContainerStarted","Data":"2a84d926cbc9f9cba353d56175098ec6cb8f09a35bfbe7ee2c460a567429dc97"} Nov 25 23:18:51 crc kubenswrapper[5045]: I1125 23:18:51.843906 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"efac58ce-5053-4a60-bcd4-41b7c1f483f2","Type":"ContainerStarted","Data":"15f4619582cb145afaaa012dba4c3acad22c633a269a34f0a105c0345385f759"} Nov 25 23:18:51 crc kubenswrapper[5045]: I1125 23:18:51.844459 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"efac58ce-5053-4a60-bcd4-41b7c1f483f2","Type":"ContainerStarted","Data":"67fb18206d086e20c6eedebe514cc9e18160fa4e2b31222c09269f3639dbfe95"} Nov 25 23:18:51 crc kubenswrapper[5045]: I1125 23:18:51.875970 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=2.875945774 podStartE2EDuration="2.875945774s" podCreationTimestamp="2025-11-25 23:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:18:51.870673391 +0000 UTC m=+1188.228332503" watchObservedRunningTime="2025-11-25 23:18:51.875945774 +0000 UTC m=+1188.233604886" Nov 25 23:18:54 crc kubenswrapper[5045]: I1125 23:18:54.467873 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-68c57f7894-dq5jz" Nov 25 23:18:54 crc kubenswrapper[5045]: I1125 23:18:54.875402 5045 generic.go:334] "Generic (PLEG): container finished" podID="55785523-a194-47c5-ad63-5955cc73241e" containerID="6bf2d5e4a5c4e921dddf93c952627bd84c8581f90c30f336ea367eca29affe72" exitCode=0 Nov 25 23:18:54 crc kubenswrapper[5045]: I1125 23:18:54.875530 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64bf94568b-qzrfh" event={"ID":"55785523-a194-47c5-ad63-5955cc73241e","Type":"ContainerDied","Data":"6bf2d5e4a5c4e921dddf93c952627bd84c8581f90c30f336ea367eca29affe72"} Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.171225 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.457022 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.495051 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-ovndb-tls-certs\") pod \"55785523-a194-47c5-ad63-5955cc73241e\" (UID: \"55785523-a194-47c5-ad63-5955cc73241e\") " Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.495112 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-combined-ca-bundle\") pod \"55785523-a194-47c5-ad63-5955cc73241e\" (UID: \"55785523-a194-47c5-ad63-5955cc73241e\") " Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.495150 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mwfqw\" (UniqueName: \"kubernetes.io/projected/55785523-a194-47c5-ad63-5955cc73241e-kube-api-access-mwfqw\") pod \"55785523-a194-47c5-ad63-5955cc73241e\" (UID: \"55785523-a194-47c5-ad63-5955cc73241e\") " Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.495275 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-config\") pod \"55785523-a194-47c5-ad63-5955cc73241e\" (UID: \"55785523-a194-47c5-ad63-5955cc73241e\") " Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.495370 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-httpd-config\") pod \"55785523-a194-47c5-ad63-5955cc73241e\" (UID: \"55785523-a194-47c5-ad63-5955cc73241e\") " Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.500648 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "55785523-a194-47c5-ad63-5955cc73241e" (UID: "55785523-a194-47c5-ad63-5955cc73241e"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.523876 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55785523-a194-47c5-ad63-5955cc73241e-kube-api-access-mwfqw" (OuterVolumeSpecName: "kube-api-access-mwfqw") pod "55785523-a194-47c5-ad63-5955cc73241e" (UID: "55785523-a194-47c5-ad63-5955cc73241e"). InnerVolumeSpecName "kube-api-access-mwfqw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.549181 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-config" (OuterVolumeSpecName: "config") pod "55785523-a194-47c5-ad63-5955cc73241e" (UID: "55785523-a194-47c5-ad63-5955cc73241e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.558266 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "55785523-a194-47c5-ad63-5955cc73241e" (UID: "55785523-a194-47c5-ad63-5955cc73241e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.587744 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "55785523-a194-47c5-ad63-5955cc73241e" (UID: "55785523-a194-47c5-ad63-5955cc73241e"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.607657 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.607748 5045 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.607761 5045 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.607774 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55785523-a194-47c5-ad63-5955cc73241e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.607788 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mwfqw\" (UniqueName: \"kubernetes.io/projected/55785523-a194-47c5-ad63-5955cc73241e-kube-api-access-mwfqw\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.888907 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64bf94568b-qzrfh" event={"ID":"55785523-a194-47c5-ad63-5955cc73241e","Type":"ContainerDied","Data":"54398baf8d6207d0d1d80fc3d8a177563d05f00ff5a3c1891363a1c874539125"} Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.888959 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-64bf94568b-qzrfh" Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.889006 5045 scope.go:117] "RemoveContainer" containerID="a09aea8b0c2a3c94c914667901dfd3ee6b1d501d9295f8c98796f23ab77d197d" Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.927026 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-64bf94568b-qzrfh"] Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.929422 5045 scope.go:117] "RemoveContainer" containerID="6bf2d5e4a5c4e921dddf93c952627bd84c8581f90c30f336ea367eca29affe72" Nov 25 23:18:55 crc kubenswrapper[5045]: I1125 23:18:55.936657 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-64bf94568b-qzrfh"] Nov 25 23:18:56 crc kubenswrapper[5045]: I1125 23:18:56.432240 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55785523-a194-47c5-ad63-5955cc73241e" path="/var/lib/kubelet/pods/55785523-a194-47c5-ad63-5955cc73241e/volumes" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.379049 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 25 23:18:58 crc kubenswrapper[5045]: E1125 23:18:58.379737 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55785523-a194-47c5-ad63-5955cc73241e" containerName="neutron-httpd" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.379757 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="55785523-a194-47c5-ad63-5955cc73241e" containerName="neutron-httpd" Nov 25 23:18:58 crc kubenswrapper[5045]: E1125 23:18:58.379775 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55785523-a194-47c5-ad63-5955cc73241e" containerName="neutron-api" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.379783 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="55785523-a194-47c5-ad63-5955cc73241e" containerName="neutron-api" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.379985 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="55785523-a194-47c5-ad63-5955cc73241e" containerName="neutron-api" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.380010 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="55785523-a194-47c5-ad63-5955cc73241e" containerName="neutron-httpd" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.380567 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.382954 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.384419 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.384786 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-pvfb6" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.392290 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.459233 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-openstack-config-secret\") pod \"openstackclient\" (UID: \"a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.459466 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.459541 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-openstack-config\") pod \"openstackclient\" (UID: \"a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.459646 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ctqn\" (UniqueName: \"kubernetes.io/projected/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-kube-api-access-4ctqn\") pod \"openstackclient\" (UID: \"a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.561770 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.561853 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-openstack-config\") pod \"openstackclient\" (UID: \"a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.561895 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ctqn\" (UniqueName: \"kubernetes.io/projected/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-kube-api-access-4ctqn\") pod \"openstackclient\" (UID: \"a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.562025 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-openstack-config-secret\") pod \"openstackclient\" (UID: \"a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.562900 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-openstack-config\") pod \"openstackclient\" (UID: \"a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.569446 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.584410 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-openstack-config-secret\") pod \"openstackclient\" (UID: \"a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.584495 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ctqn\" (UniqueName: \"kubernetes.io/projected/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-kube-api-access-4ctqn\") pod \"openstackclient\" (UID: \"a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.719928 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.759886 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.773316 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.798969 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.800355 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.808761 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.872739 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7909b434-b7ea-46af-8c4e-b5454df0ba0f-openstack-config\") pod \"openstackclient\" (UID: \"7909b434-b7ea-46af-8c4e-b5454df0ba0f\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.872814 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7909b434-b7ea-46af-8c4e-b5454df0ba0f-combined-ca-bundle\") pod \"openstackclient\" (UID: \"7909b434-b7ea-46af-8c4e-b5454df0ba0f\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.872842 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7909b434-b7ea-46af-8c4e-b5454df0ba0f-openstack-config-secret\") pod \"openstackclient\" (UID: \"7909b434-b7ea-46af-8c4e-b5454df0ba0f\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.872886 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zvpg\" (UniqueName: \"kubernetes.io/projected/7909b434-b7ea-46af-8c4e-b5454df0ba0f-kube-api-access-6zvpg\") pod \"openstackclient\" (UID: \"7909b434-b7ea-46af-8c4e-b5454df0ba0f\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: E1125 23:18:58.892376 5045 log.go:32] "RunPodSandbox from runtime service failed" err=< Nov 25 23:18:58 crc kubenswrapper[5045]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c_0(cbdf6905a1d39e65caabbe17f5f838449c1bee922f771d30780b61555693388f): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"cbdf6905a1d39e65caabbe17f5f838449c1bee922f771d30780b61555693388f" Netns:"/var/run/netns/2142acff-b803-444e-92f9-9c86d1529d56" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=cbdf6905a1d39e65caabbe17f5f838449c1bee922f771d30780b61555693388f;K8S_POD_UID=a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c]: expected pod UID "a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c" but got "7909b434-b7ea-46af-8c4e-b5454df0ba0f" from Kube API Nov 25 23:18:58 crc kubenswrapper[5045]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 25 23:18:58 crc kubenswrapper[5045]: > Nov 25 23:18:58 crc kubenswrapper[5045]: E1125 23:18:58.892444 5045 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Nov 25 23:18:58 crc kubenswrapper[5045]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c_0(cbdf6905a1d39e65caabbe17f5f838449c1bee922f771d30780b61555693388f): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"cbdf6905a1d39e65caabbe17f5f838449c1bee922f771d30780b61555693388f" Netns:"/var/run/netns/2142acff-b803-444e-92f9-9c86d1529d56" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=cbdf6905a1d39e65caabbe17f5f838449c1bee922f771d30780b61555693388f;K8S_POD_UID=a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c]: expected pod UID "a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c" but got "7909b434-b7ea-46af-8c4e-b5454df0ba0f" from Kube API Nov 25 23:18:58 crc kubenswrapper[5045]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 25 23:18:58 crc kubenswrapper[5045]: > pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.911591 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.914858 5045 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c" podUID="7909b434-b7ea-46af-8c4e-b5454df0ba0f" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.925611 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.974428 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7909b434-b7ea-46af-8c4e-b5454df0ba0f-openstack-config-secret\") pod \"openstackclient\" (UID: \"7909b434-b7ea-46af-8c4e-b5454df0ba0f\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.974542 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zvpg\" (UniqueName: \"kubernetes.io/projected/7909b434-b7ea-46af-8c4e-b5454df0ba0f-kube-api-access-6zvpg\") pod \"openstackclient\" (UID: \"7909b434-b7ea-46af-8c4e-b5454df0ba0f\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.974660 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7909b434-b7ea-46af-8c4e-b5454df0ba0f-openstack-config\") pod \"openstackclient\" (UID: \"7909b434-b7ea-46af-8c4e-b5454df0ba0f\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.974726 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7909b434-b7ea-46af-8c4e-b5454df0ba0f-combined-ca-bundle\") pod \"openstackclient\" (UID: \"7909b434-b7ea-46af-8c4e-b5454df0ba0f\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.975768 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7909b434-b7ea-46af-8c4e-b5454df0ba0f-openstack-config\") pod \"openstackclient\" (UID: \"7909b434-b7ea-46af-8c4e-b5454df0ba0f\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.979170 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7909b434-b7ea-46af-8c4e-b5454df0ba0f-combined-ca-bundle\") pod \"openstackclient\" (UID: \"7909b434-b7ea-46af-8c4e-b5454df0ba0f\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.979626 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7909b434-b7ea-46af-8c4e-b5454df0ba0f-openstack-config-secret\") pod \"openstackclient\" (UID: \"7909b434-b7ea-46af-8c4e-b5454df0ba0f\") " pod="openstack/openstackclient" Nov 25 23:18:58 crc kubenswrapper[5045]: I1125 23:18:58.989678 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zvpg\" (UniqueName: \"kubernetes.io/projected/7909b434-b7ea-46af-8c4e-b5454df0ba0f-kube-api-access-6zvpg\") pod \"openstackclient\" (UID: \"7909b434-b7ea-46af-8c4e-b5454df0ba0f\") " pod="openstack/openstackclient" Nov 25 23:18:59 crc kubenswrapper[5045]: I1125 23:18:59.076426 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4ctqn\" (UniqueName: \"kubernetes.io/projected/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-kube-api-access-4ctqn\") pod \"a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c\" (UID: \"a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c\") " Nov 25 23:18:59 crc kubenswrapper[5045]: I1125 23:18:59.076490 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-openstack-config-secret\") pod \"a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c\" (UID: \"a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c\") " Nov 25 23:18:59 crc kubenswrapper[5045]: I1125 23:18:59.076604 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-combined-ca-bundle\") pod \"a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c\" (UID: \"a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c\") " Nov 25 23:18:59 crc kubenswrapper[5045]: I1125 23:18:59.076672 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-openstack-config\") pod \"a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c\" (UID: \"a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c\") " Nov 25 23:18:59 crc kubenswrapper[5045]: I1125 23:18:59.077170 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c" (UID: "a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:18:59 crc kubenswrapper[5045]: I1125 23:18:59.080069 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-kube-api-access-4ctqn" (OuterVolumeSpecName: "kube-api-access-4ctqn") pod "a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c" (UID: "a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c"). InnerVolumeSpecName "kube-api-access-4ctqn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:18:59 crc kubenswrapper[5045]: I1125 23:18:59.080525 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c" (UID: "a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:59 crc kubenswrapper[5045]: I1125 23:18:59.080691 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c" (UID: "a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:18:59 crc kubenswrapper[5045]: I1125 23:18:59.155606 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 23:18:59 crc kubenswrapper[5045]: I1125 23:18:59.178374 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:59 crc kubenswrapper[5045]: I1125 23:18:59.178406 5045 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:59 crc kubenswrapper[5045]: I1125 23:18:59.178418 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4ctqn\" (UniqueName: \"kubernetes.io/projected/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-kube-api-access-4ctqn\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:59 crc kubenswrapper[5045]: I1125 23:18:59.178428 5045 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 25 23:18:59 crc kubenswrapper[5045]: I1125 23:18:59.633706 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 23:18:59 crc kubenswrapper[5045]: W1125 23:18:59.640019 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7909b434_b7ea_46af_8c4e_b5454df0ba0f.slice/crio-52e7926ea271f835d5c24dcae5e9f27526205e4d2ede99b02c5e97f2620cc6a3 WatchSource:0}: Error finding container 52e7926ea271f835d5c24dcae5e9f27526205e4d2ede99b02c5e97f2620cc6a3: Status 404 returned error can't find the container with id 52e7926ea271f835d5c24dcae5e9f27526205e4d2ede99b02c5e97f2620cc6a3 Nov 25 23:18:59 crc kubenswrapper[5045]: I1125 23:18:59.750102 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:59 crc kubenswrapper[5045]: I1125 23:18:59.810010 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-65cddd5cf6-fpz78" Nov 25 23:18:59 crc kubenswrapper[5045]: I1125 23:18:59.923039 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 23:18:59 crc kubenswrapper[5045]: I1125 23:18:59.923420 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"7909b434-b7ea-46af-8c4e-b5454df0ba0f","Type":"ContainerStarted","Data":"52e7926ea271f835d5c24dcae5e9f27526205e4d2ede99b02c5e97f2620cc6a3"} Nov 25 23:18:59 crc kubenswrapper[5045]: I1125 23:18:59.925680 5045 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c" podUID="7909b434-b7ea-46af-8c4e-b5454df0ba0f" Nov 25 23:19:00 crc kubenswrapper[5045]: I1125 23:19:00.368106 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 23:19:00 crc kubenswrapper[5045]: I1125 23:19:00.405542 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c" path="/var/lib/kubelet/pods/a9d76f91-8f15-4dd2-8eaf-0cc5c767d64c/volumes" Nov 25 23:19:04 crc kubenswrapper[5045]: I1125 23:19:04.971516 5045 generic.go:334] "Generic (PLEG): container finished" podID="d264a9e9-964e-4764-bb10-466754e4e77a" containerID="ebb1af0ccae336e80c7a28af9f1cbfabfbf9365cda1c3c3fd6bb1542dc9ed599" exitCode=137 Nov 25 23:19:04 crc kubenswrapper[5045]: I1125 23:19:04.971704 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d264a9e9-964e-4764-bb10-466754e4e77a","Type":"ContainerDied","Data":"ebb1af0ccae336e80c7a28af9f1cbfabfbf9365cda1c3c3fd6bb1542dc9ed599"} Nov 25 23:19:09 crc kubenswrapper[5045]: I1125 23:19:09.017718 5045 generic.go:334] "Generic (PLEG): container finished" podID="9b9340dd-6c33-4750-9fdd-5c588805522a" containerID="76e8df49f72be3137a2d869a5c038b69082782bfc66404c7cc4e441fed0d217a" exitCode=137 Nov 25 23:19:09 crc kubenswrapper[5045]: I1125 23:19:09.017736 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"9b9340dd-6c33-4750-9fdd-5c588805522a","Type":"ContainerDied","Data":"76e8df49f72be3137a2d869a5c038b69082782bfc66404c7cc4e441fed0d217a"} Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.607063 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.670035 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.697269 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-config-data-custom\") pod \"9b9340dd-6c33-4750-9fdd-5c588805522a\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.697367 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-scripts\") pod \"9b9340dd-6c33-4750-9fdd-5c588805522a\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.697395 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-config-data\") pod \"9b9340dd-6c33-4750-9fdd-5c588805522a\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.697438 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vrj78\" (UniqueName: \"kubernetes.io/projected/9b9340dd-6c33-4750-9fdd-5c588805522a-kube-api-access-vrj78\") pod \"9b9340dd-6c33-4750-9fdd-5c588805522a\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.697489 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9b9340dd-6c33-4750-9fdd-5c588805522a-etc-machine-id\") pod \"9b9340dd-6c33-4750-9fdd-5c588805522a\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.697589 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-combined-ca-bundle\") pod \"9b9340dd-6c33-4750-9fdd-5c588805522a\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.697607 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b9340dd-6c33-4750-9fdd-5c588805522a-logs\") pod \"9b9340dd-6c33-4750-9fdd-5c588805522a\" (UID: \"9b9340dd-6c33-4750-9fdd-5c588805522a\") " Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.698525 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b9340dd-6c33-4750-9fdd-5c588805522a-logs" (OuterVolumeSpecName: "logs") pod "9b9340dd-6c33-4750-9fdd-5c588805522a" (UID: "9b9340dd-6c33-4750-9fdd-5c588805522a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.700074 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b9340dd-6c33-4750-9fdd-5c588805522a-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "9b9340dd-6c33-4750-9fdd-5c588805522a" (UID: "9b9340dd-6c33-4750-9fdd-5c588805522a"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.710919 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-scripts" (OuterVolumeSpecName: "scripts") pod "9b9340dd-6c33-4750-9fdd-5c588805522a" (UID: "9b9340dd-6c33-4750-9fdd-5c588805522a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.712909 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "9b9340dd-6c33-4750-9fdd-5c588805522a" (UID: "9b9340dd-6c33-4750-9fdd-5c588805522a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.712967 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b9340dd-6c33-4750-9fdd-5c588805522a-kube-api-access-vrj78" (OuterVolumeSpecName: "kube-api-access-vrj78") pod "9b9340dd-6c33-4750-9fdd-5c588805522a" (UID: "9b9340dd-6c33-4750-9fdd-5c588805522a"). InnerVolumeSpecName "kube-api-access-vrj78". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.729398 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9b9340dd-6c33-4750-9fdd-5c588805522a" (UID: "9b9340dd-6c33-4750-9fdd-5c588805522a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.794735 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-config-data" (OuterVolumeSpecName: "config-data") pod "9b9340dd-6c33-4750-9fdd-5c588805522a" (UID: "9b9340dd-6c33-4750-9fdd-5c588805522a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.801081 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-combined-ca-bundle\") pod \"d264a9e9-964e-4764-bb10-466754e4e77a\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.801158 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d264a9e9-964e-4764-bb10-466754e4e77a-run-httpd\") pod \"d264a9e9-964e-4764-bb10-466754e4e77a\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.801200 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d264a9e9-964e-4764-bb10-466754e4e77a-log-httpd\") pod \"d264a9e9-964e-4764-bb10-466754e4e77a\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.801233 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-scripts\") pod \"d264a9e9-964e-4764-bb10-466754e4e77a\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.801298 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-sg-core-conf-yaml\") pod \"d264a9e9-964e-4764-bb10-466754e4e77a\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.801350 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cm8nq\" (UniqueName: \"kubernetes.io/projected/d264a9e9-964e-4764-bb10-466754e4e77a-kube-api-access-cm8nq\") pod \"d264a9e9-964e-4764-bb10-466754e4e77a\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.801380 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-config-data\") pod \"d264a9e9-964e-4764-bb10-466754e4e77a\" (UID: \"d264a9e9-964e-4764-bb10-466754e4e77a\") " Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.801727 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.801742 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.801750 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vrj78\" (UniqueName: \"kubernetes.io/projected/9b9340dd-6c33-4750-9fdd-5c588805522a-kube-api-access-vrj78\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.801760 5045 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9b9340dd-6c33-4750-9fdd-5c588805522a-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.801768 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.801777 5045 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b9340dd-6c33-4750-9fdd-5c588805522a-logs\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.801784 5045 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9b9340dd-6c33-4750-9fdd-5c588805522a-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.802151 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d264a9e9-964e-4764-bb10-466754e4e77a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d264a9e9-964e-4764-bb10-466754e4e77a" (UID: "d264a9e9-964e-4764-bb10-466754e4e77a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.806109 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d264a9e9-964e-4764-bb10-466754e4e77a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d264a9e9-964e-4764-bb10-466754e4e77a" (UID: "d264a9e9-964e-4764-bb10-466754e4e77a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.815380 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-scripts" (OuterVolumeSpecName: "scripts") pod "d264a9e9-964e-4764-bb10-466754e4e77a" (UID: "d264a9e9-964e-4764-bb10-466754e4e77a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.828946 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d264a9e9-964e-4764-bb10-466754e4e77a-kube-api-access-cm8nq" (OuterVolumeSpecName: "kube-api-access-cm8nq") pod "d264a9e9-964e-4764-bb10-466754e4e77a" (UID: "d264a9e9-964e-4764-bb10-466754e4e77a"). InnerVolumeSpecName "kube-api-access-cm8nq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.898843 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d264a9e9-964e-4764-bb10-466754e4e77a" (UID: "d264a9e9-964e-4764-bb10-466754e4e77a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.904806 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.904838 5045 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.904848 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cm8nq\" (UniqueName: \"kubernetes.io/projected/d264a9e9-964e-4764-bb10-466754e4e77a-kube-api-access-cm8nq\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.904856 5045 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d264a9e9-964e-4764-bb10-466754e4e77a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.904863 5045 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d264a9e9-964e-4764-bb10-466754e4e77a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.926918 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d264a9e9-964e-4764-bb10-466754e4e77a" (UID: "d264a9e9-964e-4764-bb10-466754e4e77a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:19:10 crc kubenswrapper[5045]: I1125 23:19:10.957667 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-config-data" (OuterVolumeSpecName: "config-data") pod "d264a9e9-964e-4764-bb10-466754e4e77a" (UID: "d264a9e9-964e-4764-bb10-466754e4e77a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.006826 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.006856 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d264a9e9-964e-4764-bb10-466754e4e77a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.041437 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"9b9340dd-6c33-4750-9fdd-5c588805522a","Type":"ContainerDied","Data":"83ba3f00b3e5d5984fcc1d326f03f8ba24b12b7e8061af2872de997c386237c8"} Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.041460 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.041494 5045 scope.go:117] "RemoveContainer" containerID="76e8df49f72be3137a2d869a5c038b69082782bfc66404c7cc4e441fed0d217a" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.042933 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"7909b434-b7ea-46af-8c4e-b5454df0ba0f","Type":"ContainerStarted","Data":"246c5d2b91d190f61d51108ff4df42dad982331a7bc33b3b0822cc9e9bcc9d0a"} Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.046595 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d264a9e9-964e-4764-bb10-466754e4e77a","Type":"ContainerDied","Data":"7cc38552084b60007175c36f80e498f6cfd23b5112c9ece206e8398c46d20552"} Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.046959 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.080902 5045 scope.go:117] "RemoveContainer" containerID="e8315c17d0c389578cbc69011a4ac66311cc92d437dd36c918828abf7bf9b5a2" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.105627 5045 scope.go:117] "RemoveContainer" containerID="ebb1af0ccae336e80c7a28af9f1cbfabfbf9365cda1c3c3fd6bb1542dc9ed599" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.120922 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.402229641 podStartE2EDuration="13.12090152s" podCreationTimestamp="2025-11-25 23:18:58 +0000 UTC" firstStartedPulling="2025-11-25 23:18:59.641873566 +0000 UTC m=+1195.999532678" lastFinishedPulling="2025-11-25 23:19:10.360545435 +0000 UTC m=+1206.718204557" observedRunningTime="2025-11-25 23:19:11.081397085 +0000 UTC m=+1207.439056207" watchObservedRunningTime="2025-11-25 23:19:11.12090152 +0000 UTC m=+1207.478560622" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.132574 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.149477 5045 scope.go:117] "RemoveContainer" containerID="6c90ba0300d881d80fc270ac5bd982676a4eb5a0535df1f5587476ba47223757" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.155853 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.163853 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 23:19:11 crc kubenswrapper[5045]: E1125 23:19:11.164292 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d264a9e9-964e-4764-bb10-466754e4e77a" containerName="ceilometer-notification-agent" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.164304 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="d264a9e9-964e-4764-bb10-466754e4e77a" containerName="ceilometer-notification-agent" Nov 25 23:19:11 crc kubenswrapper[5045]: E1125 23:19:11.164323 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b9340dd-6c33-4750-9fdd-5c588805522a" containerName="cinder-api" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.164330 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b9340dd-6c33-4750-9fdd-5c588805522a" containerName="cinder-api" Nov 25 23:19:11 crc kubenswrapper[5045]: E1125 23:19:11.164345 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d264a9e9-964e-4764-bb10-466754e4e77a" containerName="proxy-httpd" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.164351 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="d264a9e9-964e-4764-bb10-466754e4e77a" containerName="proxy-httpd" Nov 25 23:19:11 crc kubenswrapper[5045]: E1125 23:19:11.164364 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d264a9e9-964e-4764-bb10-466754e4e77a" containerName="ceilometer-central-agent" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.164369 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="d264a9e9-964e-4764-bb10-466754e4e77a" containerName="ceilometer-central-agent" Nov 25 23:19:11 crc kubenswrapper[5045]: E1125 23:19:11.164380 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b9340dd-6c33-4750-9fdd-5c588805522a" containerName="cinder-api-log" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.164385 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b9340dd-6c33-4750-9fdd-5c588805522a" containerName="cinder-api-log" Nov 25 23:19:11 crc kubenswrapper[5045]: E1125 23:19:11.164395 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d264a9e9-964e-4764-bb10-466754e4e77a" containerName="sg-core" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.164401 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="d264a9e9-964e-4764-bb10-466754e4e77a" containerName="sg-core" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.164560 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="d264a9e9-964e-4764-bb10-466754e4e77a" containerName="proxy-httpd" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.164576 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b9340dd-6c33-4750-9fdd-5c588805522a" containerName="cinder-api-log" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.164593 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="d264a9e9-964e-4764-bb10-466754e4e77a" containerName="sg-core" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.164603 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="d264a9e9-964e-4764-bb10-466754e4e77a" containerName="ceilometer-notification-agent" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.164613 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b9340dd-6c33-4750-9fdd-5c588805522a" containerName="cinder-api" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.164625 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="d264a9e9-964e-4764-bb10-466754e4e77a" containerName="ceilometer-central-agent" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.166135 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.169161 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.169364 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.169486 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.179953 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.196240 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.204297 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.211040 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.213100 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.215241 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.215430 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.243121 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.252866 5045 scope.go:117] "RemoveContainer" containerID="bdec37632121d3535f3dfdb6b0610b5374b313598ecb6fcf11b0ddb7a5f880eb" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.268702 5045 scope.go:117] "RemoveContainer" containerID="d7f1317ff1ae899968a4be7a67d042903b769b8e9101e48303f747617094e99d" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.311897 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-log-httpd\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.311985 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/945c5d6c-f96a-4d6c-a78d-795e26a25699-etc-machine-id\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.312024 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-config-data\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.312047 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/945c5d6c-f96a-4d6c-a78d-795e26a25699-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.312063 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/945c5d6c-f96a-4d6c-a78d-795e26a25699-config-data\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.312189 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.312310 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8zt8\" (UniqueName: \"kubernetes.io/projected/945c5d6c-f96a-4d6c-a78d-795e26a25699-kube-api-access-k8zt8\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.312407 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvc9w\" (UniqueName: \"kubernetes.io/projected/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-kube-api-access-bvc9w\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.312457 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-scripts\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.312491 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/945c5d6c-f96a-4d6c-a78d-795e26a25699-scripts\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.312567 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/945c5d6c-f96a-4d6c-a78d-795e26a25699-config-data-custom\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.312596 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/945c5d6c-f96a-4d6c-a78d-795e26a25699-logs\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.312618 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-run-httpd\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.312644 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/945c5d6c-f96a-4d6c-a78d-795e26a25699-public-tls-certs\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.312662 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/945c5d6c-f96a-4d6c-a78d-795e26a25699-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.312681 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.414695 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.414754 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-log-httpd\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.414779 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/945c5d6c-f96a-4d6c-a78d-795e26a25699-etc-machine-id\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.414808 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-config-data\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.414831 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/945c5d6c-f96a-4d6c-a78d-795e26a25699-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.414846 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/945c5d6c-f96a-4d6c-a78d-795e26a25699-config-data\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.414872 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.414887 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8zt8\" (UniqueName: \"kubernetes.io/projected/945c5d6c-f96a-4d6c-a78d-795e26a25699-kube-api-access-k8zt8\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.414908 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvc9w\" (UniqueName: \"kubernetes.io/projected/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-kube-api-access-bvc9w\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.414926 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-scripts\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.414944 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/945c5d6c-f96a-4d6c-a78d-795e26a25699-scripts\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.414989 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/945c5d6c-f96a-4d6c-a78d-795e26a25699-config-data-custom\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.415010 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/945c5d6c-f96a-4d6c-a78d-795e26a25699-logs\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.415026 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-run-httpd\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.415051 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/945c5d6c-f96a-4d6c-a78d-795e26a25699-public-tls-certs\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.415065 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/945c5d6c-f96a-4d6c-a78d-795e26a25699-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.416033 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/945c5d6c-f96a-4d6c-a78d-795e26a25699-etc-machine-id\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.416384 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-run-httpd\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.416478 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/945c5d6c-f96a-4d6c-a78d-795e26a25699-logs\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.419058 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/945c5d6c-f96a-4d6c-a78d-795e26a25699-config-data-custom\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.419809 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/945c5d6c-f96a-4d6c-a78d-795e26a25699-config-data\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.420010 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/945c5d6c-f96a-4d6c-a78d-795e26a25699-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.420824 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/945c5d6c-f96a-4d6c-a78d-795e26a25699-scripts\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.424047 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-log-httpd\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.431775 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/945c5d6c-f96a-4d6c-a78d-795e26a25699-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.433160 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-scripts\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.437940 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.439087 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-config-data\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.446853 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.448747 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvc9w\" (UniqueName: \"kubernetes.io/projected/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-kube-api-access-bvc9w\") pod \"ceilometer-0\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " pod="openstack/ceilometer-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.449246 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/945c5d6c-f96a-4d6c-a78d-795e26a25699-public-tls-certs\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.455668 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8zt8\" (UniqueName: \"kubernetes.io/projected/945c5d6c-f96a-4d6c-a78d-795e26a25699-kube-api-access-k8zt8\") pod \"cinder-api-0\" (UID: \"945c5d6c-f96a-4d6c-a78d-795e26a25699\") " pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.557742 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 23:19:11 crc kubenswrapper[5045]: I1125 23:19:11.566990 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:19:12 crc kubenswrapper[5045]: I1125 23:19:12.021879 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:19:12 crc kubenswrapper[5045]: W1125 23:19:12.026139 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d7c106e_ad01_45ac_ad3e_992ba8dc500a.slice/crio-052d9c7e77063996fc4e7b87e8b0a3b2be9bfb266fa1e789b9b8c54f4f0c88e0 WatchSource:0}: Error finding container 052d9c7e77063996fc4e7b87e8b0a3b2be9bfb266fa1e789b9b8c54f4f0c88e0: Status 404 returned error can't find the container with id 052d9c7e77063996fc4e7b87e8b0a3b2be9bfb266fa1e789b9b8c54f4f0c88e0 Nov 25 23:19:12 crc kubenswrapper[5045]: I1125 23:19:12.060493 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d7c106e-ad01-45ac-ad3e-992ba8dc500a","Type":"ContainerStarted","Data":"052d9c7e77063996fc4e7b87e8b0a3b2be9bfb266fa1e789b9b8c54f4f0c88e0"} Nov 25 23:19:12 crc kubenswrapper[5045]: I1125 23:19:12.121828 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 23:19:12 crc kubenswrapper[5045]: I1125 23:19:12.412682 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b9340dd-6c33-4750-9fdd-5c588805522a" path="/var/lib/kubelet/pods/9b9340dd-6c33-4750-9fdd-5c588805522a/volumes" Nov 25 23:19:12 crc kubenswrapper[5045]: I1125 23:19:12.413606 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d264a9e9-964e-4764-bb10-466754e4e77a" path="/var/lib/kubelet/pods/d264a9e9-964e-4764-bb10-466754e4e77a/volumes" Nov 25 23:19:12 crc kubenswrapper[5045]: I1125 23:19:12.871501 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-7p9hc"] Nov 25 23:19:12 crc kubenswrapper[5045]: I1125 23:19:12.873101 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-7p9hc" Nov 25 23:19:12 crc kubenswrapper[5045]: I1125 23:19:12.880938 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-7p9hc"] Nov 25 23:19:12 crc kubenswrapper[5045]: I1125 23:19:12.943073 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvrxx\" (UniqueName: \"kubernetes.io/projected/ce8b6926-27cc-4df4-86eb-53a9a17e548d-kube-api-access-nvrxx\") pod \"nova-api-db-create-7p9hc\" (UID: \"ce8b6926-27cc-4df4-86eb-53a9a17e548d\") " pod="openstack/nova-api-db-create-7p9hc" Nov 25 23:19:12 crc kubenswrapper[5045]: I1125 23:19:12.943176 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ce8b6926-27cc-4df4-86eb-53a9a17e548d-operator-scripts\") pod \"nova-api-db-create-7p9hc\" (UID: \"ce8b6926-27cc-4df4-86eb-53a9a17e548d\") " pod="openstack/nova-api-db-create-7p9hc" Nov 25 23:19:12 crc kubenswrapper[5045]: I1125 23:19:12.978364 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-wzkt8"] Nov 25 23:19:12 crc kubenswrapper[5045]: I1125 23:19:12.979838 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-wzkt8" Nov 25 23:19:12 crc kubenswrapper[5045]: I1125 23:19:12.998624 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-f20f-account-create-update-mn9dj"] Nov 25 23:19:12 crc kubenswrapper[5045]: I1125 23:19:12.999844 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f20f-account-create-update-mn9dj" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.002096 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.005486 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-wzkt8"] Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.012930 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-f20f-account-create-update-mn9dj"] Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.045167 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ce8b6926-27cc-4df4-86eb-53a9a17e548d-operator-scripts\") pod \"nova-api-db-create-7p9hc\" (UID: \"ce8b6926-27cc-4df4-86eb-53a9a17e548d\") " pod="openstack/nova-api-db-create-7p9hc" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.045302 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f627043-0efd-4c7f-a7e6-16a2d5b12650-operator-scripts\") pod \"nova-cell0-db-create-wzkt8\" (UID: \"0f627043-0efd-4c7f-a7e6-16a2d5b12650\") " pod="openstack/nova-cell0-db-create-wzkt8" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.045365 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbvhk\" (UniqueName: \"kubernetes.io/projected/0f627043-0efd-4c7f-a7e6-16a2d5b12650-kube-api-access-rbvhk\") pod \"nova-cell0-db-create-wzkt8\" (UID: \"0f627043-0efd-4c7f-a7e6-16a2d5b12650\") " pod="openstack/nova-cell0-db-create-wzkt8" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.045413 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvrxx\" (UniqueName: \"kubernetes.io/projected/ce8b6926-27cc-4df4-86eb-53a9a17e548d-kube-api-access-nvrxx\") pod \"nova-api-db-create-7p9hc\" (UID: \"ce8b6926-27cc-4df4-86eb-53a9a17e548d\") " pod="openstack/nova-api-db-create-7p9hc" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.047571 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ce8b6926-27cc-4df4-86eb-53a9a17e548d-operator-scripts\") pod \"nova-api-db-create-7p9hc\" (UID: \"ce8b6926-27cc-4df4-86eb-53a9a17e548d\") " pod="openstack/nova-api-db-create-7p9hc" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.067577 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvrxx\" (UniqueName: \"kubernetes.io/projected/ce8b6926-27cc-4df4-86eb-53a9a17e548d-kube-api-access-nvrxx\") pod \"nova-api-db-create-7p9hc\" (UID: \"ce8b6926-27cc-4df4-86eb-53a9a17e548d\") " pod="openstack/nova-api-db-create-7p9hc" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.081599 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d7c106e-ad01-45ac-ad3e-992ba8dc500a","Type":"ContainerStarted","Data":"9d4097f4176e3d7850573cf6770097fdf6410f89d48e73fc11e65c40a92955c1"} Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.092323 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"945c5d6c-f96a-4d6c-a78d-795e26a25699","Type":"ContainerStarted","Data":"c8488669edc9edb531db776c42d45ab34bdf9597bed0508fc80c65167e5d992e"} Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.092361 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"945c5d6c-f96a-4d6c-a78d-795e26a25699","Type":"ContainerStarted","Data":"b05f78859625826e5fd29bc00cf2e184348f962ddcbf93131f49f96e8a35028d"} Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.127836 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.149699 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbvhk\" (UniqueName: \"kubernetes.io/projected/0f627043-0efd-4c7f-a7e6-16a2d5b12650-kube-api-access-rbvhk\") pod \"nova-cell0-db-create-wzkt8\" (UID: \"0f627043-0efd-4c7f-a7e6-16a2d5b12650\") " pod="openstack/nova-cell0-db-create-wzkt8" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.149815 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2xhz\" (UniqueName: \"kubernetes.io/projected/dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f-kube-api-access-z2xhz\") pod \"nova-api-f20f-account-create-update-mn9dj\" (UID: \"dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f\") " pod="openstack/nova-api-f20f-account-create-update-mn9dj" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.149894 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f627043-0efd-4c7f-a7e6-16a2d5b12650-operator-scripts\") pod \"nova-cell0-db-create-wzkt8\" (UID: \"0f627043-0efd-4c7f-a7e6-16a2d5b12650\") " pod="openstack/nova-cell0-db-create-wzkt8" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.149911 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f-operator-scripts\") pod \"nova-api-f20f-account-create-update-mn9dj\" (UID: \"dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f\") " pod="openstack/nova-api-f20f-account-create-update-mn9dj" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.150641 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f627043-0efd-4c7f-a7e6-16a2d5b12650-operator-scripts\") pod \"nova-cell0-db-create-wzkt8\" (UID: \"0f627043-0efd-4c7f-a7e6-16a2d5b12650\") " pod="openstack/nova-cell0-db-create-wzkt8" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.172228 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-wscvj"] Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.173468 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-wscvj" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.184916 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-wscvj"] Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.191416 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbvhk\" (UniqueName: \"kubernetes.io/projected/0f627043-0efd-4c7f-a7e6-16a2d5b12650-kube-api-access-rbvhk\") pod \"nova-cell0-db-create-wzkt8\" (UID: \"0f627043-0efd-4c7f-a7e6-16a2d5b12650\") " pod="openstack/nova-cell0-db-create-wzkt8" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.197352 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-5376-account-create-update-r9q8j"] Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.197796 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-7p9hc" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.198701 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-5376-account-create-update-r9q8j" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.200502 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.216854 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-5376-account-create-update-r9q8j"] Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.251793 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f-operator-scripts\") pod \"nova-api-f20f-account-create-update-mn9dj\" (UID: \"dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f\") " pod="openstack/nova-api-f20f-account-create-update-mn9dj" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.251840 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n5nb9\" (UniqueName: \"kubernetes.io/projected/983ae039-683a-49dc-a8e6-cfe305174a70-kube-api-access-n5nb9\") pod \"nova-cell1-db-create-wscvj\" (UID: \"983ae039-683a-49dc-a8e6-cfe305174a70\") " pod="openstack/nova-cell1-db-create-wscvj" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.251923 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2xhz\" (UniqueName: \"kubernetes.io/projected/dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f-kube-api-access-z2xhz\") pod \"nova-api-f20f-account-create-update-mn9dj\" (UID: \"dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f\") " pod="openstack/nova-api-f20f-account-create-update-mn9dj" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.251956 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/983ae039-683a-49dc-a8e6-cfe305174a70-operator-scripts\") pod \"nova-cell1-db-create-wscvj\" (UID: \"983ae039-683a-49dc-a8e6-cfe305174a70\") " pod="openstack/nova-cell1-db-create-wscvj" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.252614 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f-operator-scripts\") pod \"nova-api-f20f-account-create-update-mn9dj\" (UID: \"dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f\") " pod="openstack/nova-api-f20f-account-create-update-mn9dj" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.277409 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2xhz\" (UniqueName: \"kubernetes.io/projected/dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f-kube-api-access-z2xhz\") pod \"nova-api-f20f-account-create-update-mn9dj\" (UID: \"dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f\") " pod="openstack/nova-api-f20f-account-create-update-mn9dj" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.315118 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-wzkt8" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.322922 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f20f-account-create-update-mn9dj" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.356105 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8rb9\" (UniqueName: \"kubernetes.io/projected/8502cc42-8be4-4cd2-b329-91f28be89ac8-kube-api-access-r8rb9\") pod \"nova-cell0-5376-account-create-update-r9q8j\" (UID: \"8502cc42-8be4-4cd2-b329-91f28be89ac8\") " pod="openstack/nova-cell0-5376-account-create-update-r9q8j" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.356198 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n5nb9\" (UniqueName: \"kubernetes.io/projected/983ae039-683a-49dc-a8e6-cfe305174a70-kube-api-access-n5nb9\") pod \"nova-cell1-db-create-wscvj\" (UID: \"983ae039-683a-49dc-a8e6-cfe305174a70\") " pod="openstack/nova-cell1-db-create-wscvj" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.356264 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8502cc42-8be4-4cd2-b329-91f28be89ac8-operator-scripts\") pod \"nova-cell0-5376-account-create-update-r9q8j\" (UID: \"8502cc42-8be4-4cd2-b329-91f28be89ac8\") " pod="openstack/nova-cell0-5376-account-create-update-r9q8j" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.356352 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/983ae039-683a-49dc-a8e6-cfe305174a70-operator-scripts\") pod \"nova-cell1-db-create-wscvj\" (UID: \"983ae039-683a-49dc-a8e6-cfe305174a70\") " pod="openstack/nova-cell1-db-create-wscvj" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.357151 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/983ae039-683a-49dc-a8e6-cfe305174a70-operator-scripts\") pod \"nova-cell1-db-create-wscvj\" (UID: \"983ae039-683a-49dc-a8e6-cfe305174a70\") " pod="openstack/nova-cell1-db-create-wscvj" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.398436 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n5nb9\" (UniqueName: \"kubernetes.io/projected/983ae039-683a-49dc-a8e6-cfe305174a70-kube-api-access-n5nb9\") pod \"nova-cell1-db-create-wscvj\" (UID: \"983ae039-683a-49dc-a8e6-cfe305174a70\") " pod="openstack/nova-cell1-db-create-wscvj" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.403778 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-03db-account-create-update-697t8"] Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.405000 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-03db-account-create-update-697t8" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.407076 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.459061 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8502cc42-8be4-4cd2-b329-91f28be89ac8-operator-scripts\") pod \"nova-cell0-5376-account-create-update-r9q8j\" (UID: \"8502cc42-8be4-4cd2-b329-91f28be89ac8\") " pod="openstack/nova-cell0-5376-account-create-update-r9q8j" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.459692 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8502cc42-8be4-4cd2-b329-91f28be89ac8-operator-scripts\") pod \"nova-cell0-5376-account-create-update-r9q8j\" (UID: \"8502cc42-8be4-4cd2-b329-91f28be89ac8\") " pod="openstack/nova-cell0-5376-account-create-update-r9q8j" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.460128 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8rb9\" (UniqueName: \"kubernetes.io/projected/8502cc42-8be4-4cd2-b329-91f28be89ac8-kube-api-access-r8rb9\") pod \"nova-cell0-5376-account-create-update-r9q8j\" (UID: \"8502cc42-8be4-4cd2-b329-91f28be89ac8\") " pod="openstack/nova-cell0-5376-account-create-update-r9q8j" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.463391 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-03db-account-create-update-697t8"] Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.481373 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8rb9\" (UniqueName: \"kubernetes.io/projected/8502cc42-8be4-4cd2-b329-91f28be89ac8-kube-api-access-r8rb9\") pod \"nova-cell0-5376-account-create-update-r9q8j\" (UID: \"8502cc42-8be4-4cd2-b329-91f28be89ac8\") " pod="openstack/nova-cell0-5376-account-create-update-r9q8j" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.561501 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-wscvj" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.562347 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bafa51d2-8844-41d5-ad62-5000cb9f18e5-operator-scripts\") pod \"nova-cell1-03db-account-create-update-697t8\" (UID: \"bafa51d2-8844-41d5-ad62-5000cb9f18e5\") " pod="openstack/nova-cell1-03db-account-create-update-697t8" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.562388 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mjrjp\" (UniqueName: \"kubernetes.io/projected/bafa51d2-8844-41d5-ad62-5000cb9f18e5-kube-api-access-mjrjp\") pod \"nova-cell1-03db-account-create-update-697t8\" (UID: \"bafa51d2-8844-41d5-ad62-5000cb9f18e5\") " pod="openstack/nova-cell1-03db-account-create-update-697t8" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.624273 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-5376-account-create-update-r9q8j" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.663535 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bafa51d2-8844-41d5-ad62-5000cb9f18e5-operator-scripts\") pod \"nova-cell1-03db-account-create-update-697t8\" (UID: \"bafa51d2-8844-41d5-ad62-5000cb9f18e5\") " pod="openstack/nova-cell1-03db-account-create-update-697t8" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.663587 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mjrjp\" (UniqueName: \"kubernetes.io/projected/bafa51d2-8844-41d5-ad62-5000cb9f18e5-kube-api-access-mjrjp\") pod \"nova-cell1-03db-account-create-update-697t8\" (UID: \"bafa51d2-8844-41d5-ad62-5000cb9f18e5\") " pod="openstack/nova-cell1-03db-account-create-update-697t8" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.665212 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bafa51d2-8844-41d5-ad62-5000cb9f18e5-operator-scripts\") pod \"nova-cell1-03db-account-create-update-697t8\" (UID: \"bafa51d2-8844-41d5-ad62-5000cb9f18e5\") " pod="openstack/nova-cell1-03db-account-create-update-697t8" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.685590 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mjrjp\" (UniqueName: \"kubernetes.io/projected/bafa51d2-8844-41d5-ad62-5000cb9f18e5-kube-api-access-mjrjp\") pod \"nova-cell1-03db-account-create-update-697t8\" (UID: \"bafa51d2-8844-41d5-ad62-5000cb9f18e5\") " pod="openstack/nova-cell1-03db-account-create-update-697t8" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.746692 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-03db-account-create-update-697t8" Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.755571 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-7p9hc"] Nov 25 23:19:13 crc kubenswrapper[5045]: I1125 23:19:13.933230 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-wzkt8"] Nov 25 23:19:14 crc kubenswrapper[5045]: I1125 23:19:14.060531 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-f20f-account-create-update-mn9dj"] Nov 25 23:19:14 crc kubenswrapper[5045]: W1125 23:19:14.064502 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc9f74e5_fe2e_4aee_924e_e73fd9e9ef2f.slice/crio-979e9fb248967ac21352ffdd6d57d0e49f2858dbf4e392f43a03d95ad0f71ad0 WatchSource:0}: Error finding container 979e9fb248967ac21352ffdd6d57d0e49f2858dbf4e392f43a03d95ad0f71ad0: Status 404 returned error can't find the container with id 979e9fb248967ac21352ffdd6d57d0e49f2858dbf4e392f43a03d95ad0f71ad0 Nov 25 23:19:14 crc kubenswrapper[5045]: I1125 23:19:14.105150 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-7p9hc" event={"ID":"ce8b6926-27cc-4df4-86eb-53a9a17e548d","Type":"ContainerStarted","Data":"e21614a740586314e5b6619f53a4219515e19f23d857587f4f4fc3df9b5284dc"} Nov 25 23:19:14 crc kubenswrapper[5045]: I1125 23:19:14.105205 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-7p9hc" event={"ID":"ce8b6926-27cc-4df4-86eb-53a9a17e548d","Type":"ContainerStarted","Data":"b4e7314cb61e21cd262cbcf174527f992253809138e9b9e37b120f363543b9ce"} Nov 25 23:19:14 crc kubenswrapper[5045]: I1125 23:19:14.113502 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"945c5d6c-f96a-4d6c-a78d-795e26a25699","Type":"ContainerStarted","Data":"6a96350385d404dba3c634bf0010721d89625d1634780c83212e283c5c2bb9c6"} Nov 25 23:19:14 crc kubenswrapper[5045]: I1125 23:19:14.113808 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 23:19:14 crc kubenswrapper[5045]: I1125 23:19:14.115222 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f20f-account-create-update-mn9dj" event={"ID":"dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f","Type":"ContainerStarted","Data":"979e9fb248967ac21352ffdd6d57d0e49f2858dbf4e392f43a03d95ad0f71ad0"} Nov 25 23:19:14 crc kubenswrapper[5045]: I1125 23:19:14.117430 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-wzkt8" event={"ID":"0f627043-0efd-4c7f-a7e6-16a2d5b12650","Type":"ContainerStarted","Data":"1035f342dbcba546900f51c236032af8595773f05c5f1bd520758b931bc08f1a"} Nov 25 23:19:14 crc kubenswrapper[5045]: I1125 23:19:14.119512 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d7c106e-ad01-45ac-ad3e-992ba8dc500a","Type":"ContainerStarted","Data":"63a28bc00f5c9ca6e615dc0ce7b3d1172313c0b8378ff8c7778b0d0e7e72850d"} Nov 25 23:19:14 crc kubenswrapper[5045]: I1125 23:19:14.127424 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-7p9hc" podStartSLOduration=2.127405794 podStartE2EDuration="2.127405794s" podCreationTimestamp="2025-11-25 23:19:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:19:14.119641609 +0000 UTC m=+1210.477300721" watchObservedRunningTime="2025-11-25 23:19:14.127405794 +0000 UTC m=+1210.485064906" Nov 25 23:19:14 crc kubenswrapper[5045]: I1125 23:19:14.139250 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.139233387 podStartE2EDuration="3.139233387s" podCreationTimestamp="2025-11-25 23:19:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:19:14.13796529 +0000 UTC m=+1210.495624402" watchObservedRunningTime="2025-11-25 23:19:14.139233387 +0000 UTC m=+1210.496892499" Nov 25 23:19:14 crc kubenswrapper[5045]: I1125 23:19:14.257448 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-wscvj"] Nov 25 23:19:14 crc kubenswrapper[5045]: I1125 23:19:14.274171 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-5376-account-create-update-r9q8j"] Nov 25 23:19:14 crc kubenswrapper[5045]: I1125 23:19:14.326672 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-03db-account-create-update-697t8"] Nov 25 23:19:15 crc kubenswrapper[5045]: I1125 23:19:15.131362 5045 generic.go:334] "Generic (PLEG): container finished" podID="ce8b6926-27cc-4df4-86eb-53a9a17e548d" containerID="e21614a740586314e5b6619f53a4219515e19f23d857587f4f4fc3df9b5284dc" exitCode=0 Nov 25 23:19:15 crc kubenswrapper[5045]: I1125 23:19:15.131405 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-7p9hc" event={"ID":"ce8b6926-27cc-4df4-86eb-53a9a17e548d","Type":"ContainerDied","Data":"e21614a740586314e5b6619f53a4219515e19f23d857587f4f4fc3df9b5284dc"} Nov 25 23:19:15 crc kubenswrapper[5045]: I1125 23:19:15.138537 5045 generic.go:334] "Generic (PLEG): container finished" podID="0f627043-0efd-4c7f-a7e6-16a2d5b12650" containerID="66115cabeaef3327c6ca5397c7dc11e8c6951dad1b51d63957e85eecca97dd3c" exitCode=0 Nov 25 23:19:15 crc kubenswrapper[5045]: I1125 23:19:15.138613 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-wzkt8" event={"ID":"0f627043-0efd-4c7f-a7e6-16a2d5b12650","Type":"ContainerDied","Data":"66115cabeaef3327c6ca5397c7dc11e8c6951dad1b51d63957e85eecca97dd3c"} Nov 25 23:19:15 crc kubenswrapper[5045]: I1125 23:19:15.157242 5045 generic.go:334] "Generic (PLEG): container finished" podID="983ae039-683a-49dc-a8e6-cfe305174a70" containerID="bb06757e8d16196b52789a3af42cc911e546ff88ed70ad4c9b5fea47627b4589" exitCode=0 Nov 25 23:19:15 crc kubenswrapper[5045]: I1125 23:19:15.157329 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-wscvj" event={"ID":"983ae039-683a-49dc-a8e6-cfe305174a70","Type":"ContainerDied","Data":"bb06757e8d16196b52789a3af42cc911e546ff88ed70ad4c9b5fea47627b4589"} Nov 25 23:19:15 crc kubenswrapper[5045]: I1125 23:19:15.157355 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-wscvj" event={"ID":"983ae039-683a-49dc-a8e6-cfe305174a70","Type":"ContainerStarted","Data":"11bc4e685fda90aa29532921620533ea7185f3d1151a6eaa46a4e543729de814"} Nov 25 23:19:15 crc kubenswrapper[5045]: I1125 23:19:15.159588 5045 generic.go:334] "Generic (PLEG): container finished" podID="8502cc42-8be4-4cd2-b329-91f28be89ac8" containerID="18309d55735010527c49a18dffbf05debe914d26c010cbd48eb9cfc556f26120" exitCode=0 Nov 25 23:19:15 crc kubenswrapper[5045]: I1125 23:19:15.159695 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-5376-account-create-update-r9q8j" event={"ID":"8502cc42-8be4-4cd2-b329-91f28be89ac8","Type":"ContainerDied","Data":"18309d55735010527c49a18dffbf05debe914d26c010cbd48eb9cfc556f26120"} Nov 25 23:19:15 crc kubenswrapper[5045]: I1125 23:19:15.159745 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-5376-account-create-update-r9q8j" event={"ID":"8502cc42-8be4-4cd2-b329-91f28be89ac8","Type":"ContainerStarted","Data":"e2b5e4e7fe8556d3a073c22a5a1c5f2dc774d79869e8c58c8457a2db6e373d55"} Nov 25 23:19:15 crc kubenswrapper[5045]: I1125 23:19:15.167682 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d7c106e-ad01-45ac-ad3e-992ba8dc500a","Type":"ContainerStarted","Data":"76632e000fc29d5b8b8fe1917eae0a97c9fb4679e4288a1ca48bfe8009994644"} Nov 25 23:19:15 crc kubenswrapper[5045]: I1125 23:19:15.169009 5045 generic.go:334] "Generic (PLEG): container finished" podID="bafa51d2-8844-41d5-ad62-5000cb9f18e5" containerID="edf42132a733929c3dbc71af34e742f6637a31313ecb5546c9fb80dec797b63c" exitCode=0 Nov 25 23:19:15 crc kubenswrapper[5045]: I1125 23:19:15.169042 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-03db-account-create-update-697t8" event={"ID":"bafa51d2-8844-41d5-ad62-5000cb9f18e5","Type":"ContainerDied","Data":"edf42132a733929c3dbc71af34e742f6637a31313ecb5546c9fb80dec797b63c"} Nov 25 23:19:15 crc kubenswrapper[5045]: I1125 23:19:15.169058 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-03db-account-create-update-697t8" event={"ID":"bafa51d2-8844-41d5-ad62-5000cb9f18e5","Type":"ContainerStarted","Data":"c5b6af8a3d950571d216b9252718bd64bfc51708ca76946b4a30e722cd24cf5a"} Nov 25 23:19:15 crc kubenswrapper[5045]: I1125 23:19:15.182876 5045 generic.go:334] "Generic (PLEG): container finished" podID="dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f" containerID="65fd06be77de57381a987b2759a4eab64c44932e5851903bda61639acc2dd21f" exitCode=0 Nov 25 23:19:15 crc kubenswrapper[5045]: I1125 23:19:15.182907 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f20f-account-create-update-mn9dj" event={"ID":"dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f","Type":"ContainerDied","Data":"65fd06be77de57381a987b2759a4eab64c44932e5851903bda61639acc2dd21f"} Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.602087 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-wscvj" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.758883 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-03db-account-create-update-697t8" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.767369 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n5nb9\" (UniqueName: \"kubernetes.io/projected/983ae039-683a-49dc-a8e6-cfe305174a70-kube-api-access-n5nb9\") pod \"983ae039-683a-49dc-a8e6-cfe305174a70\" (UID: \"983ae039-683a-49dc-a8e6-cfe305174a70\") " Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.767481 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/983ae039-683a-49dc-a8e6-cfe305174a70-operator-scripts\") pod \"983ae039-683a-49dc-a8e6-cfe305174a70\" (UID: \"983ae039-683a-49dc-a8e6-cfe305174a70\") " Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.768380 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/983ae039-683a-49dc-a8e6-cfe305174a70-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "983ae039-683a-49dc-a8e6-cfe305174a70" (UID: "983ae039-683a-49dc-a8e6-cfe305174a70"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.769380 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-5376-account-create-update-r9q8j" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.773480 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/983ae039-683a-49dc-a8e6-cfe305174a70-kube-api-access-n5nb9" (OuterVolumeSpecName: "kube-api-access-n5nb9") pod "983ae039-683a-49dc-a8e6-cfe305174a70" (UID: "983ae039-683a-49dc-a8e6-cfe305174a70"). InnerVolumeSpecName "kube-api-access-n5nb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.777218 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f20f-account-create-update-mn9dj" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.848678 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-7p9hc" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.869284 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r8rb9\" (UniqueName: \"kubernetes.io/projected/8502cc42-8be4-4cd2-b329-91f28be89ac8-kube-api-access-r8rb9\") pod \"8502cc42-8be4-4cd2-b329-91f28be89ac8\" (UID: \"8502cc42-8be4-4cd2-b329-91f28be89ac8\") " Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.869539 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bafa51d2-8844-41d5-ad62-5000cb9f18e5-operator-scripts\") pod \"bafa51d2-8844-41d5-ad62-5000cb9f18e5\" (UID: \"bafa51d2-8844-41d5-ad62-5000cb9f18e5\") " Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.869597 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8502cc42-8be4-4cd2-b329-91f28be89ac8-operator-scripts\") pod \"8502cc42-8be4-4cd2-b329-91f28be89ac8\" (UID: \"8502cc42-8be4-4cd2-b329-91f28be89ac8\") " Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.869659 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mjrjp\" (UniqueName: \"kubernetes.io/projected/bafa51d2-8844-41d5-ad62-5000cb9f18e5-kube-api-access-mjrjp\") pod \"bafa51d2-8844-41d5-ad62-5000cb9f18e5\" (UID: \"bafa51d2-8844-41d5-ad62-5000cb9f18e5\") " Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.871218 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bafa51d2-8844-41d5-ad62-5000cb9f18e5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bafa51d2-8844-41d5-ad62-5000cb9f18e5" (UID: "bafa51d2-8844-41d5-ad62-5000cb9f18e5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.873246 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8502cc42-8be4-4cd2-b329-91f28be89ac8-kube-api-access-r8rb9" (OuterVolumeSpecName: "kube-api-access-r8rb9") pod "8502cc42-8be4-4cd2-b329-91f28be89ac8" (UID: "8502cc42-8be4-4cd2-b329-91f28be89ac8"). InnerVolumeSpecName "kube-api-access-r8rb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.874415 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8502cc42-8be4-4cd2-b329-91f28be89ac8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8502cc42-8be4-4cd2-b329-91f28be89ac8" (UID: "8502cc42-8be4-4cd2-b329-91f28be89ac8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.877487 5045 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/983ae039-683a-49dc-a8e6-cfe305174a70-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.877575 5045 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bafa51d2-8844-41d5-ad62-5000cb9f18e5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.877642 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bafa51d2-8844-41d5-ad62-5000cb9f18e5-kube-api-access-mjrjp" (OuterVolumeSpecName: "kube-api-access-mjrjp") pod "bafa51d2-8844-41d5-ad62-5000cb9f18e5" (UID: "bafa51d2-8844-41d5-ad62-5000cb9f18e5"). InnerVolumeSpecName "kube-api-access-mjrjp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.877655 5045 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8502cc42-8be4-4cd2-b329-91f28be89ac8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.877751 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r8rb9\" (UniqueName: \"kubernetes.io/projected/8502cc42-8be4-4cd2-b329-91f28be89ac8-kube-api-access-r8rb9\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.877826 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n5nb9\" (UniqueName: \"kubernetes.io/projected/983ae039-683a-49dc-a8e6-cfe305174a70-kube-api-access-n5nb9\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.891177 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-wzkt8" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.979398 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nvrxx\" (UniqueName: \"kubernetes.io/projected/ce8b6926-27cc-4df4-86eb-53a9a17e548d-kube-api-access-nvrxx\") pod \"ce8b6926-27cc-4df4-86eb-53a9a17e548d\" (UID: \"ce8b6926-27cc-4df4-86eb-53a9a17e548d\") " Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.979469 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f-operator-scripts\") pod \"dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f\" (UID: \"dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f\") " Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.979542 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2xhz\" (UniqueName: \"kubernetes.io/projected/dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f-kube-api-access-z2xhz\") pod \"dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f\" (UID: \"dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f\") " Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.979701 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ce8b6926-27cc-4df4-86eb-53a9a17e548d-operator-scripts\") pod \"ce8b6926-27cc-4df4-86eb-53a9a17e548d\" (UID: \"ce8b6926-27cc-4df4-86eb-53a9a17e548d\") " Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.980172 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mjrjp\" (UniqueName: \"kubernetes.io/projected/bafa51d2-8844-41d5-ad62-5000cb9f18e5-kube-api-access-mjrjp\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.980407 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f" (UID: "dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.980693 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce8b6926-27cc-4df4-86eb-53a9a17e548d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ce8b6926-27cc-4df4-86eb-53a9a17e548d" (UID: "ce8b6926-27cc-4df4-86eb-53a9a17e548d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.983853 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce8b6926-27cc-4df4-86eb-53a9a17e548d-kube-api-access-nvrxx" (OuterVolumeSpecName: "kube-api-access-nvrxx") pod "ce8b6926-27cc-4df4-86eb-53a9a17e548d" (UID: "ce8b6926-27cc-4df4-86eb-53a9a17e548d"). InnerVolumeSpecName "kube-api-access-nvrxx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:19:16 crc kubenswrapper[5045]: I1125 23:19:16.983890 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f-kube-api-access-z2xhz" (OuterVolumeSpecName: "kube-api-access-z2xhz") pod "dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f" (UID: "dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f"). InnerVolumeSpecName "kube-api-access-z2xhz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.081743 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f627043-0efd-4c7f-a7e6-16a2d5b12650-operator-scripts\") pod \"0f627043-0efd-4c7f-a7e6-16a2d5b12650\" (UID: \"0f627043-0efd-4c7f-a7e6-16a2d5b12650\") " Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.081953 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbvhk\" (UniqueName: \"kubernetes.io/projected/0f627043-0efd-4c7f-a7e6-16a2d5b12650-kube-api-access-rbvhk\") pod \"0f627043-0efd-4c7f-a7e6-16a2d5b12650\" (UID: \"0f627043-0efd-4c7f-a7e6-16a2d5b12650\") " Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.082242 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f627043-0efd-4c7f-a7e6-16a2d5b12650-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0f627043-0efd-4c7f-a7e6-16a2d5b12650" (UID: "0f627043-0efd-4c7f-a7e6-16a2d5b12650"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.082635 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nvrxx\" (UniqueName: \"kubernetes.io/projected/ce8b6926-27cc-4df4-86eb-53a9a17e548d-kube-api-access-nvrxx\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.082659 5045 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.082672 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2xhz\" (UniqueName: \"kubernetes.io/projected/dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f-kube-api-access-z2xhz\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.082686 5045 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f627043-0efd-4c7f-a7e6-16a2d5b12650-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.082696 5045 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ce8b6926-27cc-4df4-86eb-53a9a17e548d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.086020 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f627043-0efd-4c7f-a7e6-16a2d5b12650-kube-api-access-rbvhk" (OuterVolumeSpecName: "kube-api-access-rbvhk") pod "0f627043-0efd-4c7f-a7e6-16a2d5b12650" (UID: "0f627043-0efd-4c7f-a7e6-16a2d5b12650"). InnerVolumeSpecName "kube-api-access-rbvhk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.184490 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbvhk\" (UniqueName: \"kubernetes.io/projected/0f627043-0efd-4c7f-a7e6-16a2d5b12650-kube-api-access-rbvhk\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.201598 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-wzkt8" event={"ID":"0f627043-0efd-4c7f-a7e6-16a2d5b12650","Type":"ContainerDied","Data":"1035f342dbcba546900f51c236032af8595773f05c5f1bd520758b931bc08f1a"} Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.201642 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1035f342dbcba546900f51c236032af8595773f05c5f1bd520758b931bc08f1a" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.201702 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-wzkt8" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.212341 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-wscvj" event={"ID":"983ae039-683a-49dc-a8e6-cfe305174a70","Type":"ContainerDied","Data":"11bc4e685fda90aa29532921620533ea7185f3d1151a6eaa46a4e543729de814"} Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.212389 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="11bc4e685fda90aa29532921620533ea7185f3d1151a6eaa46a4e543729de814" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.212462 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-wscvj" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.216142 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-5376-account-create-update-r9q8j" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.216186 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-5376-account-create-update-r9q8j" event={"ID":"8502cc42-8be4-4cd2-b329-91f28be89ac8","Type":"ContainerDied","Data":"e2b5e4e7fe8556d3a073c22a5a1c5f2dc774d79869e8c58c8457a2db6e373d55"} Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.216233 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e2b5e4e7fe8556d3a073c22a5a1c5f2dc774d79869e8c58c8457a2db6e373d55" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.219470 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d7c106e-ad01-45ac-ad3e-992ba8dc500a","Type":"ContainerStarted","Data":"979d8f02542f6e777a8348b9bd8048a4885bdae49062afcc3596b8b161ab2cd4"} Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.219582 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.219593 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" containerName="ceilometer-central-agent" containerID="cri-o://9d4097f4176e3d7850573cf6770097fdf6410f89d48e73fc11e65c40a92955c1" gracePeriod=30 Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.219644 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" containerName="proxy-httpd" containerID="cri-o://979d8f02542f6e777a8348b9bd8048a4885bdae49062afcc3596b8b161ab2cd4" gracePeriod=30 Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.219655 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" containerName="sg-core" containerID="cri-o://76632e000fc29d5b8b8fe1917eae0a97c9fb4679e4288a1ca48bfe8009994644" gracePeriod=30 Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.219897 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" containerName="ceilometer-notification-agent" containerID="cri-o://63a28bc00f5c9ca6e615dc0ce7b3d1172313c0b8378ff8c7778b0d0e7e72850d" gracePeriod=30 Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.235223 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-03db-account-create-update-697t8" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.235287 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-03db-account-create-update-697t8" event={"ID":"bafa51d2-8844-41d5-ad62-5000cb9f18e5","Type":"ContainerDied","Data":"c5b6af8a3d950571d216b9252718bd64bfc51708ca76946b4a30e722cd24cf5a"} Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.235316 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c5b6af8a3d950571d216b9252718bd64bfc51708ca76946b4a30e722cd24cf5a" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.241975 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f20f-account-create-update-mn9dj" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.241985 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f20f-account-create-update-mn9dj" event={"ID":"dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f","Type":"ContainerDied","Data":"979e9fb248967ac21352ffdd6d57d0e49f2858dbf4e392f43a03d95ad0f71ad0"} Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.244874 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="979e9fb248967ac21352ffdd6d57d0e49f2858dbf4e392f43a03d95ad0f71ad0" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.248386 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-7p9hc" event={"ID":"ce8b6926-27cc-4df4-86eb-53a9a17e548d","Type":"ContainerDied","Data":"b4e7314cb61e21cd262cbcf174527f992253809138e9b9e37b120f363543b9ce"} Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.248425 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b4e7314cb61e21cd262cbcf174527f992253809138e9b9e37b120f363543b9ce" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.248436 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-7p9hc" Nov 25 23:19:17 crc kubenswrapper[5045]: I1125 23:19:17.256353 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.235043671 podStartE2EDuration="6.256337868s" podCreationTimestamp="2025-11-25 23:19:11 +0000 UTC" firstStartedPulling="2025-11-25 23:19:12.028099955 +0000 UTC m=+1208.385759067" lastFinishedPulling="2025-11-25 23:19:16.049394152 +0000 UTC m=+1212.407053264" observedRunningTime="2025-11-25 23:19:17.25366578 +0000 UTC m=+1213.611324892" watchObservedRunningTime="2025-11-25 23:19:17.256337868 +0000 UTC m=+1213.613996980" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.259479 5045 generic.go:334] "Generic (PLEG): container finished" podID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" containerID="979d8f02542f6e777a8348b9bd8048a4885bdae49062afcc3596b8b161ab2cd4" exitCode=0 Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.260268 5045 generic.go:334] "Generic (PLEG): container finished" podID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" containerID="76632e000fc29d5b8b8fe1917eae0a97c9fb4679e4288a1ca48bfe8009994644" exitCode=2 Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.260289 5045 generic.go:334] "Generic (PLEG): container finished" podID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" containerID="63a28bc00f5c9ca6e615dc0ce7b3d1172313c0b8378ff8c7778b0d0e7e72850d" exitCode=0 Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.259567 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d7c106e-ad01-45ac-ad3e-992ba8dc500a","Type":"ContainerDied","Data":"979d8f02542f6e777a8348b9bd8048a4885bdae49062afcc3596b8b161ab2cd4"} Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.260329 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d7c106e-ad01-45ac-ad3e-992ba8dc500a","Type":"ContainerDied","Data":"76632e000fc29d5b8b8fe1917eae0a97c9fb4679e4288a1ca48bfe8009994644"} Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.260345 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d7c106e-ad01-45ac-ad3e-992ba8dc500a","Type":"ContainerDied","Data":"63a28bc00f5c9ca6e615dc0ce7b3d1172313c0b8378ff8c7778b0d0e7e72850d"} Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.547704 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-gcz6n"] Nov 25 23:19:18 crc kubenswrapper[5045]: E1125 23:19:18.548159 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8502cc42-8be4-4cd2-b329-91f28be89ac8" containerName="mariadb-account-create-update" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.548178 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="8502cc42-8be4-4cd2-b329-91f28be89ac8" containerName="mariadb-account-create-update" Nov 25 23:19:18 crc kubenswrapper[5045]: E1125 23:19:18.548194 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f" containerName="mariadb-account-create-update" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.548203 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f" containerName="mariadb-account-create-update" Nov 25 23:19:18 crc kubenswrapper[5045]: E1125 23:19:18.548212 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bafa51d2-8844-41d5-ad62-5000cb9f18e5" containerName="mariadb-account-create-update" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.548221 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="bafa51d2-8844-41d5-ad62-5000cb9f18e5" containerName="mariadb-account-create-update" Nov 25 23:19:18 crc kubenswrapper[5045]: E1125 23:19:18.548241 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce8b6926-27cc-4df4-86eb-53a9a17e548d" containerName="mariadb-database-create" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.548249 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce8b6926-27cc-4df4-86eb-53a9a17e548d" containerName="mariadb-database-create" Nov 25 23:19:18 crc kubenswrapper[5045]: E1125 23:19:18.548268 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="983ae039-683a-49dc-a8e6-cfe305174a70" containerName="mariadb-database-create" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.548274 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="983ae039-683a-49dc-a8e6-cfe305174a70" containerName="mariadb-database-create" Nov 25 23:19:18 crc kubenswrapper[5045]: E1125 23:19:18.548289 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f627043-0efd-4c7f-a7e6-16a2d5b12650" containerName="mariadb-database-create" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.548296 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f627043-0efd-4c7f-a7e6-16a2d5b12650" containerName="mariadb-database-create" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.548488 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="983ae039-683a-49dc-a8e6-cfe305174a70" containerName="mariadb-database-create" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.548508 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f" containerName="mariadb-account-create-update" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.548517 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="8502cc42-8be4-4cd2-b329-91f28be89ac8" containerName="mariadb-account-create-update" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.548530 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="bafa51d2-8844-41d5-ad62-5000cb9f18e5" containerName="mariadb-account-create-update" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.548556 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce8b6926-27cc-4df4-86eb-53a9a17e548d" containerName="mariadb-database-create" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.548570 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f627043-0efd-4c7f-a7e6-16a2d5b12650" containerName="mariadb-database-create" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.549270 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-gcz6n" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.551766 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.552533 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.554076 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-ww4nf" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.556737 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-gcz6n"] Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.610030 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1076c0c8-e031-42c0-9978-5ca0d1cbd401-config-data\") pod \"nova-cell0-conductor-db-sync-gcz6n\" (UID: \"1076c0c8-e031-42c0-9978-5ca0d1cbd401\") " pod="openstack/nova-cell0-conductor-db-sync-gcz6n" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.610069 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1076c0c8-e031-42c0-9978-5ca0d1cbd401-scripts\") pod \"nova-cell0-conductor-db-sync-gcz6n\" (UID: \"1076c0c8-e031-42c0-9978-5ca0d1cbd401\") " pod="openstack/nova-cell0-conductor-db-sync-gcz6n" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.610099 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wx5m9\" (UniqueName: \"kubernetes.io/projected/1076c0c8-e031-42c0-9978-5ca0d1cbd401-kube-api-access-wx5m9\") pod \"nova-cell0-conductor-db-sync-gcz6n\" (UID: \"1076c0c8-e031-42c0-9978-5ca0d1cbd401\") " pod="openstack/nova-cell0-conductor-db-sync-gcz6n" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.610154 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1076c0c8-e031-42c0-9978-5ca0d1cbd401-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-gcz6n\" (UID: \"1076c0c8-e031-42c0-9978-5ca0d1cbd401\") " pod="openstack/nova-cell0-conductor-db-sync-gcz6n" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.711665 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1076c0c8-e031-42c0-9978-5ca0d1cbd401-config-data\") pod \"nova-cell0-conductor-db-sync-gcz6n\" (UID: \"1076c0c8-e031-42c0-9978-5ca0d1cbd401\") " pod="openstack/nova-cell0-conductor-db-sync-gcz6n" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.711910 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1076c0c8-e031-42c0-9978-5ca0d1cbd401-scripts\") pod \"nova-cell0-conductor-db-sync-gcz6n\" (UID: \"1076c0c8-e031-42c0-9978-5ca0d1cbd401\") " pod="openstack/nova-cell0-conductor-db-sync-gcz6n" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.711934 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wx5m9\" (UniqueName: \"kubernetes.io/projected/1076c0c8-e031-42c0-9978-5ca0d1cbd401-kube-api-access-wx5m9\") pod \"nova-cell0-conductor-db-sync-gcz6n\" (UID: \"1076c0c8-e031-42c0-9978-5ca0d1cbd401\") " pod="openstack/nova-cell0-conductor-db-sync-gcz6n" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.712296 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1076c0c8-e031-42c0-9978-5ca0d1cbd401-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-gcz6n\" (UID: \"1076c0c8-e031-42c0-9978-5ca0d1cbd401\") " pod="openstack/nova-cell0-conductor-db-sync-gcz6n" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.718686 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1076c0c8-e031-42c0-9978-5ca0d1cbd401-scripts\") pod \"nova-cell0-conductor-db-sync-gcz6n\" (UID: \"1076c0c8-e031-42c0-9978-5ca0d1cbd401\") " pod="openstack/nova-cell0-conductor-db-sync-gcz6n" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.724498 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1076c0c8-e031-42c0-9978-5ca0d1cbd401-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-gcz6n\" (UID: \"1076c0c8-e031-42c0-9978-5ca0d1cbd401\") " pod="openstack/nova-cell0-conductor-db-sync-gcz6n" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.726912 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1076c0c8-e031-42c0-9978-5ca0d1cbd401-config-data\") pod \"nova-cell0-conductor-db-sync-gcz6n\" (UID: \"1076c0c8-e031-42c0-9978-5ca0d1cbd401\") " pod="openstack/nova-cell0-conductor-db-sync-gcz6n" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.753955 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wx5m9\" (UniqueName: \"kubernetes.io/projected/1076c0c8-e031-42c0-9978-5ca0d1cbd401-kube-api-access-wx5m9\") pod \"nova-cell0-conductor-db-sync-gcz6n\" (UID: \"1076c0c8-e031-42c0-9978-5ca0d1cbd401\") " pod="openstack/nova-cell0-conductor-db-sync-gcz6n" Nov 25 23:19:18 crc kubenswrapper[5045]: I1125 23:19:18.916396 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-gcz6n" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.011460 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.017006 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-config-data\") pod \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.017077 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-sg-core-conf-yaml\") pod \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.017113 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-scripts\") pod \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.017157 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-run-httpd\") pod \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.017172 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-log-httpd\") pod \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.017188 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bvc9w\" (UniqueName: \"kubernetes.io/projected/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-kube-api-access-bvc9w\") pod \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.017213 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-combined-ca-bundle\") pod \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\" (UID: \"3d7c106e-ad01-45ac-ad3e-992ba8dc500a\") " Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.018567 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3d7c106e-ad01-45ac-ad3e-992ba8dc500a" (UID: "3d7c106e-ad01-45ac-ad3e-992ba8dc500a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.018841 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3d7c106e-ad01-45ac-ad3e-992ba8dc500a" (UID: "3d7c106e-ad01-45ac-ad3e-992ba8dc500a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.021058 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-kube-api-access-bvc9w" (OuterVolumeSpecName: "kube-api-access-bvc9w") pod "3d7c106e-ad01-45ac-ad3e-992ba8dc500a" (UID: "3d7c106e-ad01-45ac-ad3e-992ba8dc500a"). InnerVolumeSpecName "kube-api-access-bvc9w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.021839 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-scripts" (OuterVolumeSpecName: "scripts") pod "3d7c106e-ad01-45ac-ad3e-992ba8dc500a" (UID: "3d7c106e-ad01-45ac-ad3e-992ba8dc500a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.048031 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3d7c106e-ad01-45ac-ad3e-992ba8dc500a" (UID: "3d7c106e-ad01-45ac-ad3e-992ba8dc500a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.093220 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3d7c106e-ad01-45ac-ad3e-992ba8dc500a" (UID: "3d7c106e-ad01-45ac-ad3e-992ba8dc500a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.118595 5045 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.118624 5045 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.118638 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bvc9w\" (UniqueName: \"kubernetes.io/projected/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-kube-api-access-bvc9w\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.118646 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.118656 5045 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.118664 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.127974 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-config-data" (OuterVolumeSpecName: "config-data") pod "3d7c106e-ad01-45ac-ad3e-992ba8dc500a" (UID: "3d7c106e-ad01-45ac-ad3e-992ba8dc500a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.145523 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-gcz6n"] Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.220038 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d7c106e-ad01-45ac-ad3e-992ba8dc500a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.272473 5045 generic.go:334] "Generic (PLEG): container finished" podID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" containerID="9d4097f4176e3d7850573cf6770097fdf6410f89d48e73fc11e65c40a92955c1" exitCode=0 Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.272522 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.272531 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d7c106e-ad01-45ac-ad3e-992ba8dc500a","Type":"ContainerDied","Data":"9d4097f4176e3d7850573cf6770097fdf6410f89d48e73fc11e65c40a92955c1"} Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.272630 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d7c106e-ad01-45ac-ad3e-992ba8dc500a","Type":"ContainerDied","Data":"052d9c7e77063996fc4e7b87e8b0a3b2be9bfb266fa1e789b9b8c54f4f0c88e0"} Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.272647 5045 scope.go:117] "RemoveContainer" containerID="979d8f02542f6e777a8348b9bd8048a4885bdae49062afcc3596b8b161ab2cd4" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.277324 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-gcz6n" event={"ID":"1076c0c8-e031-42c0-9978-5ca0d1cbd401","Type":"ContainerStarted","Data":"683de488209ef93056812460cd88d142e14734a941a920971364eeee621c4e17"} Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.294338 5045 scope.go:117] "RemoveContainer" containerID="76632e000fc29d5b8b8fe1917eae0a97c9fb4679e4288a1ca48bfe8009994644" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.303680 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.311244 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.315001 5045 scope.go:117] "RemoveContainer" containerID="63a28bc00f5c9ca6e615dc0ce7b3d1172313c0b8378ff8c7778b0d0e7e72850d" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.334831 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:19:19 crc kubenswrapper[5045]: E1125 23:19:19.335925 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" containerName="ceilometer-central-agent" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.335952 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" containerName="ceilometer-central-agent" Nov 25 23:19:19 crc kubenswrapper[5045]: E1125 23:19:19.336004 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" containerName="ceilometer-notification-agent" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.336016 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" containerName="ceilometer-notification-agent" Nov 25 23:19:19 crc kubenswrapper[5045]: E1125 23:19:19.336046 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" containerName="sg-core" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.336055 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" containerName="sg-core" Nov 25 23:19:19 crc kubenswrapper[5045]: E1125 23:19:19.336081 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" containerName="proxy-httpd" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.336090 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" containerName="proxy-httpd" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.341158 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" containerName="ceilometer-notification-agent" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.341241 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" containerName="sg-core" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.341259 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" containerName="ceilometer-central-agent" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.341276 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" containerName="proxy-httpd" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.346306 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.352680 5045 scope.go:117] "RemoveContainer" containerID="9d4097f4176e3d7850573cf6770097fdf6410f89d48e73fc11e65c40a92955c1" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.361998 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.362676 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.368316 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.393607 5045 scope.go:117] "RemoveContainer" containerID="979d8f02542f6e777a8348b9bd8048a4885bdae49062afcc3596b8b161ab2cd4" Nov 25 23:19:19 crc kubenswrapper[5045]: E1125 23:19:19.394091 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"979d8f02542f6e777a8348b9bd8048a4885bdae49062afcc3596b8b161ab2cd4\": container with ID starting with 979d8f02542f6e777a8348b9bd8048a4885bdae49062afcc3596b8b161ab2cd4 not found: ID does not exist" containerID="979d8f02542f6e777a8348b9bd8048a4885bdae49062afcc3596b8b161ab2cd4" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.394125 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"979d8f02542f6e777a8348b9bd8048a4885bdae49062afcc3596b8b161ab2cd4"} err="failed to get container status \"979d8f02542f6e777a8348b9bd8048a4885bdae49062afcc3596b8b161ab2cd4\": rpc error: code = NotFound desc = could not find container \"979d8f02542f6e777a8348b9bd8048a4885bdae49062afcc3596b8b161ab2cd4\": container with ID starting with 979d8f02542f6e777a8348b9bd8048a4885bdae49062afcc3596b8b161ab2cd4 not found: ID does not exist" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.394146 5045 scope.go:117] "RemoveContainer" containerID="76632e000fc29d5b8b8fe1917eae0a97c9fb4679e4288a1ca48bfe8009994644" Nov 25 23:19:19 crc kubenswrapper[5045]: E1125 23:19:19.394418 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76632e000fc29d5b8b8fe1917eae0a97c9fb4679e4288a1ca48bfe8009994644\": container with ID starting with 76632e000fc29d5b8b8fe1917eae0a97c9fb4679e4288a1ca48bfe8009994644 not found: ID does not exist" containerID="76632e000fc29d5b8b8fe1917eae0a97c9fb4679e4288a1ca48bfe8009994644" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.394441 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76632e000fc29d5b8b8fe1917eae0a97c9fb4679e4288a1ca48bfe8009994644"} err="failed to get container status \"76632e000fc29d5b8b8fe1917eae0a97c9fb4679e4288a1ca48bfe8009994644\": rpc error: code = NotFound desc = could not find container \"76632e000fc29d5b8b8fe1917eae0a97c9fb4679e4288a1ca48bfe8009994644\": container with ID starting with 76632e000fc29d5b8b8fe1917eae0a97c9fb4679e4288a1ca48bfe8009994644 not found: ID does not exist" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.394456 5045 scope.go:117] "RemoveContainer" containerID="63a28bc00f5c9ca6e615dc0ce7b3d1172313c0b8378ff8c7778b0d0e7e72850d" Nov 25 23:19:19 crc kubenswrapper[5045]: E1125 23:19:19.394900 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63a28bc00f5c9ca6e615dc0ce7b3d1172313c0b8378ff8c7778b0d0e7e72850d\": container with ID starting with 63a28bc00f5c9ca6e615dc0ce7b3d1172313c0b8378ff8c7778b0d0e7e72850d not found: ID does not exist" containerID="63a28bc00f5c9ca6e615dc0ce7b3d1172313c0b8378ff8c7778b0d0e7e72850d" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.394998 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63a28bc00f5c9ca6e615dc0ce7b3d1172313c0b8378ff8c7778b0d0e7e72850d"} err="failed to get container status \"63a28bc00f5c9ca6e615dc0ce7b3d1172313c0b8378ff8c7778b0d0e7e72850d\": rpc error: code = NotFound desc = could not find container \"63a28bc00f5c9ca6e615dc0ce7b3d1172313c0b8378ff8c7778b0d0e7e72850d\": container with ID starting with 63a28bc00f5c9ca6e615dc0ce7b3d1172313c0b8378ff8c7778b0d0e7e72850d not found: ID does not exist" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.395082 5045 scope.go:117] "RemoveContainer" containerID="9d4097f4176e3d7850573cf6770097fdf6410f89d48e73fc11e65c40a92955c1" Nov 25 23:19:19 crc kubenswrapper[5045]: E1125 23:19:19.395503 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d4097f4176e3d7850573cf6770097fdf6410f89d48e73fc11e65c40a92955c1\": container with ID starting with 9d4097f4176e3d7850573cf6770097fdf6410f89d48e73fc11e65c40a92955c1 not found: ID does not exist" containerID="9d4097f4176e3d7850573cf6770097fdf6410f89d48e73fc11e65c40a92955c1" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.395550 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d4097f4176e3d7850573cf6770097fdf6410f89d48e73fc11e65c40a92955c1"} err="failed to get container status \"9d4097f4176e3d7850573cf6770097fdf6410f89d48e73fc11e65c40a92955c1\": rpc error: code = NotFound desc = could not find container \"9d4097f4176e3d7850573cf6770097fdf6410f89d48e73fc11e65c40a92955c1\": container with ID starting with 9d4097f4176e3d7850573cf6770097fdf6410f89d48e73fc11e65c40a92955c1 not found: ID does not exist" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.524057 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c3533471-c586-4b49-8130-65d6100260d5-log-httpd\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.524116 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwn5k\" (UniqueName: \"kubernetes.io/projected/c3533471-c586-4b49-8130-65d6100260d5-kube-api-access-jwn5k\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.524295 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c3533471-c586-4b49-8130-65d6100260d5-run-httpd\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.524510 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-scripts\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.524588 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.524650 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.524815 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-config-data\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.626971 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c3533471-c586-4b49-8130-65d6100260d5-run-httpd\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.627179 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-scripts\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.627402 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c3533471-c586-4b49-8130-65d6100260d5-run-httpd\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.628205 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.628273 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.628381 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-config-data\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.628444 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c3533471-c586-4b49-8130-65d6100260d5-log-httpd\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.628478 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwn5k\" (UniqueName: \"kubernetes.io/projected/c3533471-c586-4b49-8130-65d6100260d5-kube-api-access-jwn5k\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.629152 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c3533471-c586-4b49-8130-65d6100260d5-log-httpd\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.632453 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.633127 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-config-data\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.633498 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.633817 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-scripts\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.666995 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwn5k\" (UniqueName: \"kubernetes.io/projected/c3533471-c586-4b49-8130-65d6100260d5-kube-api-access-jwn5k\") pod \"ceilometer-0\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " pod="openstack/ceilometer-0" Nov 25 23:19:19 crc kubenswrapper[5045]: I1125 23:19:19.692154 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:19:20 crc kubenswrapper[5045]: I1125 23:19:20.136047 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:19:20 crc kubenswrapper[5045]: W1125 23:19:20.146686 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc3533471_c586_4b49_8130_65d6100260d5.slice/crio-a6e843b4c77f361653042c496e683c76fa73a533b1592d9cb85b267d81d35c16 WatchSource:0}: Error finding container a6e843b4c77f361653042c496e683c76fa73a533b1592d9cb85b267d81d35c16: Status 404 returned error can't find the container with id a6e843b4c77f361653042c496e683c76fa73a533b1592d9cb85b267d81d35c16 Nov 25 23:19:20 crc kubenswrapper[5045]: I1125 23:19:20.291815 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c3533471-c586-4b49-8130-65d6100260d5","Type":"ContainerStarted","Data":"a6e843b4c77f361653042c496e683c76fa73a533b1592d9cb85b267d81d35c16"} Nov 25 23:19:20 crc kubenswrapper[5045]: I1125 23:19:20.408092 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d7c106e-ad01-45ac-ad3e-992ba8dc500a" path="/var/lib/kubelet/pods/3d7c106e-ad01-45ac-ad3e-992ba8dc500a/volumes" Nov 25 23:19:21 crc kubenswrapper[5045]: I1125 23:19:21.144901 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:19:21 crc kubenswrapper[5045]: I1125 23:19:21.303095 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c3533471-c586-4b49-8130-65d6100260d5","Type":"ContainerStarted","Data":"19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63"} Nov 25 23:19:22 crc kubenswrapper[5045]: I1125 23:19:22.312078 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c3533471-c586-4b49-8130-65d6100260d5","Type":"ContainerStarted","Data":"4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad"} Nov 25 23:19:23 crc kubenswrapper[5045]: I1125 23:19:23.506193 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 25 23:19:32 crc kubenswrapper[5045]: I1125 23:19:32.418048 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c3533471-c586-4b49-8130-65d6100260d5","Type":"ContainerStarted","Data":"a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f"} Nov 25 23:19:32 crc kubenswrapper[5045]: I1125 23:19:32.419788 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-gcz6n" event={"ID":"1076c0c8-e031-42c0-9978-5ca0d1cbd401","Type":"ContainerStarted","Data":"e2f5b5fa1a31c1ed4a0841060c620289e8c2ed72d328c8fe54223016ffe240d0"} Nov 25 23:19:32 crc kubenswrapper[5045]: I1125 23:19:32.446051 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-gcz6n" podStartSLOduration=2.214943591 podStartE2EDuration="14.446033311s" podCreationTimestamp="2025-11-25 23:19:18 +0000 UTC" firstStartedPulling="2025-11-25 23:19:19.148794709 +0000 UTC m=+1215.506453821" lastFinishedPulling="2025-11-25 23:19:31.379884409 +0000 UTC m=+1227.737543541" observedRunningTime="2025-11-25 23:19:32.445419443 +0000 UTC m=+1228.803078635" watchObservedRunningTime="2025-11-25 23:19:32.446033311 +0000 UTC m=+1228.803692433" Nov 25 23:19:34 crc kubenswrapper[5045]: I1125 23:19:34.443729 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c3533471-c586-4b49-8130-65d6100260d5","Type":"ContainerStarted","Data":"cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121"} Nov 25 23:19:34 crc kubenswrapper[5045]: I1125 23:19:34.444317 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 23:19:34 crc kubenswrapper[5045]: I1125 23:19:34.443998 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c3533471-c586-4b49-8130-65d6100260d5" containerName="sg-core" containerID="cri-o://a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f" gracePeriod=30 Nov 25 23:19:34 crc kubenswrapper[5045]: I1125 23:19:34.443957 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c3533471-c586-4b49-8130-65d6100260d5" containerName="ceilometer-central-agent" containerID="cri-o://19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63" gracePeriod=30 Nov 25 23:19:34 crc kubenswrapper[5045]: I1125 23:19:34.444062 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c3533471-c586-4b49-8130-65d6100260d5" containerName="ceilometer-notification-agent" containerID="cri-o://4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad" gracePeriod=30 Nov 25 23:19:34 crc kubenswrapper[5045]: I1125 23:19:34.444025 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c3533471-c586-4b49-8130-65d6100260d5" containerName="proxy-httpd" containerID="cri-o://cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121" gracePeriod=30 Nov 25 23:19:34 crc kubenswrapper[5045]: I1125 23:19:34.481431 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.553967809 podStartE2EDuration="15.481406778s" podCreationTimestamp="2025-11-25 23:19:19 +0000 UTC" firstStartedPulling="2025-11-25 23:19:20.149873116 +0000 UTC m=+1216.507532228" lastFinishedPulling="2025-11-25 23:19:33.077312065 +0000 UTC m=+1229.434971197" observedRunningTime="2025-11-25 23:19:34.473404655 +0000 UTC m=+1230.831063767" watchObservedRunningTime="2025-11-25 23:19:34.481406778 +0000 UTC m=+1230.839065890" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.320263 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.338502 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-sg-core-conf-yaml\") pod \"c3533471-c586-4b49-8130-65d6100260d5\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.338625 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-combined-ca-bundle\") pod \"c3533471-c586-4b49-8130-65d6100260d5\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.338658 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c3533471-c586-4b49-8130-65d6100260d5-log-httpd\") pod \"c3533471-c586-4b49-8130-65d6100260d5\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.338694 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c3533471-c586-4b49-8130-65d6100260d5-run-httpd\") pod \"c3533471-c586-4b49-8130-65d6100260d5\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.338735 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jwn5k\" (UniqueName: \"kubernetes.io/projected/c3533471-c586-4b49-8130-65d6100260d5-kube-api-access-jwn5k\") pod \"c3533471-c586-4b49-8130-65d6100260d5\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.338854 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-scripts\") pod \"c3533471-c586-4b49-8130-65d6100260d5\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.338911 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-config-data\") pod \"c3533471-c586-4b49-8130-65d6100260d5\" (UID: \"c3533471-c586-4b49-8130-65d6100260d5\") " Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.339909 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3533471-c586-4b49-8130-65d6100260d5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c3533471-c586-4b49-8130-65d6100260d5" (UID: "c3533471-c586-4b49-8130-65d6100260d5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.340311 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3533471-c586-4b49-8130-65d6100260d5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c3533471-c586-4b49-8130-65d6100260d5" (UID: "c3533471-c586-4b49-8130-65d6100260d5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.348802 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3533471-c586-4b49-8130-65d6100260d5-kube-api-access-jwn5k" (OuterVolumeSpecName: "kube-api-access-jwn5k") pod "c3533471-c586-4b49-8130-65d6100260d5" (UID: "c3533471-c586-4b49-8130-65d6100260d5"). InnerVolumeSpecName "kube-api-access-jwn5k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.348837 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-scripts" (OuterVolumeSpecName: "scripts") pod "c3533471-c586-4b49-8130-65d6100260d5" (UID: "c3533471-c586-4b49-8130-65d6100260d5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.388427 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c3533471-c586-4b49-8130-65d6100260d5" (UID: "c3533471-c586-4b49-8130-65d6100260d5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.427409 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c3533471-c586-4b49-8130-65d6100260d5" (UID: "c3533471-c586-4b49-8130-65d6100260d5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.440278 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.440310 5045 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c3533471-c586-4b49-8130-65d6100260d5-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.440320 5045 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c3533471-c586-4b49-8130-65d6100260d5-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.440329 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jwn5k\" (UniqueName: \"kubernetes.io/projected/c3533471-c586-4b49-8130-65d6100260d5-kube-api-access-jwn5k\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.440340 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.440348 5045 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.454087 5045 generic.go:334] "Generic (PLEG): container finished" podID="c3533471-c586-4b49-8130-65d6100260d5" containerID="cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121" exitCode=0 Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.454148 5045 generic.go:334] "Generic (PLEG): container finished" podID="c3533471-c586-4b49-8130-65d6100260d5" containerID="a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f" exitCode=2 Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.454159 5045 generic.go:334] "Generic (PLEG): container finished" podID="c3533471-c586-4b49-8130-65d6100260d5" containerID="4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad" exitCode=0 Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.454168 5045 generic.go:334] "Generic (PLEG): container finished" podID="c3533471-c586-4b49-8130-65d6100260d5" containerID="19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63" exitCode=0 Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.454162 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.454191 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c3533471-c586-4b49-8130-65d6100260d5","Type":"ContainerDied","Data":"cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121"} Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.454248 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c3533471-c586-4b49-8130-65d6100260d5","Type":"ContainerDied","Data":"a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f"} Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.454263 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c3533471-c586-4b49-8130-65d6100260d5","Type":"ContainerDied","Data":"4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad"} Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.454275 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c3533471-c586-4b49-8130-65d6100260d5","Type":"ContainerDied","Data":"19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63"} Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.454286 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c3533471-c586-4b49-8130-65d6100260d5","Type":"ContainerDied","Data":"a6e843b4c77f361653042c496e683c76fa73a533b1592d9cb85b267d81d35c16"} Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.454305 5045 scope.go:117] "RemoveContainer" containerID="cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.461559 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-config-data" (OuterVolumeSpecName: "config-data") pod "c3533471-c586-4b49-8130-65d6100260d5" (UID: "c3533471-c586-4b49-8130-65d6100260d5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.472315 5045 scope.go:117] "RemoveContainer" containerID="a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.490399 5045 scope.go:117] "RemoveContainer" containerID="4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.505743 5045 scope.go:117] "RemoveContainer" containerID="19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.523917 5045 scope.go:117] "RemoveContainer" containerID="cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121" Nov 25 23:19:35 crc kubenswrapper[5045]: E1125 23:19:35.524339 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121\": container with ID starting with cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121 not found: ID does not exist" containerID="cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.524372 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121"} err="failed to get container status \"cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121\": rpc error: code = NotFound desc = could not find container \"cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121\": container with ID starting with cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121 not found: ID does not exist" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.524394 5045 scope.go:117] "RemoveContainer" containerID="a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f" Nov 25 23:19:35 crc kubenswrapper[5045]: E1125 23:19:35.524767 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f\": container with ID starting with a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f not found: ID does not exist" containerID="a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.524790 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f"} err="failed to get container status \"a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f\": rpc error: code = NotFound desc = could not find container \"a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f\": container with ID starting with a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f not found: ID does not exist" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.524803 5045 scope.go:117] "RemoveContainer" containerID="4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad" Nov 25 23:19:35 crc kubenswrapper[5045]: E1125 23:19:35.525384 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad\": container with ID starting with 4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad not found: ID does not exist" containerID="4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.525410 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad"} err="failed to get container status \"4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad\": rpc error: code = NotFound desc = could not find container \"4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad\": container with ID starting with 4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad not found: ID does not exist" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.525427 5045 scope.go:117] "RemoveContainer" containerID="19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63" Nov 25 23:19:35 crc kubenswrapper[5045]: E1125 23:19:35.525686 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63\": container with ID starting with 19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63 not found: ID does not exist" containerID="19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.525747 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63"} err="failed to get container status \"19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63\": rpc error: code = NotFound desc = could not find container \"19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63\": container with ID starting with 19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63 not found: ID does not exist" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.525765 5045 scope.go:117] "RemoveContainer" containerID="cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.526166 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121"} err="failed to get container status \"cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121\": rpc error: code = NotFound desc = could not find container \"cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121\": container with ID starting with cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121 not found: ID does not exist" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.526186 5045 scope.go:117] "RemoveContainer" containerID="a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.526549 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f"} err="failed to get container status \"a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f\": rpc error: code = NotFound desc = could not find container \"a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f\": container with ID starting with a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f not found: ID does not exist" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.526570 5045 scope.go:117] "RemoveContainer" containerID="4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.526867 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad"} err="failed to get container status \"4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad\": rpc error: code = NotFound desc = could not find container \"4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad\": container with ID starting with 4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad not found: ID does not exist" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.526894 5045 scope.go:117] "RemoveContainer" containerID="19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.527265 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63"} err="failed to get container status \"19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63\": rpc error: code = NotFound desc = could not find container \"19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63\": container with ID starting with 19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63 not found: ID does not exist" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.527291 5045 scope.go:117] "RemoveContainer" containerID="cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.528941 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121"} err="failed to get container status \"cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121\": rpc error: code = NotFound desc = could not find container \"cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121\": container with ID starting with cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121 not found: ID does not exist" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.528984 5045 scope.go:117] "RemoveContainer" containerID="a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.529296 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f"} err="failed to get container status \"a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f\": rpc error: code = NotFound desc = could not find container \"a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f\": container with ID starting with a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f not found: ID does not exist" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.529341 5045 scope.go:117] "RemoveContainer" containerID="4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.529620 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad"} err="failed to get container status \"4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad\": rpc error: code = NotFound desc = could not find container \"4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad\": container with ID starting with 4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad not found: ID does not exist" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.529659 5045 scope.go:117] "RemoveContainer" containerID="19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.529933 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63"} err="failed to get container status \"19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63\": rpc error: code = NotFound desc = could not find container \"19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63\": container with ID starting with 19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63 not found: ID does not exist" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.529957 5045 scope.go:117] "RemoveContainer" containerID="cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.530199 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121"} err="failed to get container status \"cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121\": rpc error: code = NotFound desc = could not find container \"cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121\": container with ID starting with cf9a04d67fcfc9f53d6fcc38801a03033a727a3883e471f7b3b479cb8e437121 not found: ID does not exist" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.530217 5045 scope.go:117] "RemoveContainer" containerID="a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.530616 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f"} err="failed to get container status \"a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f\": rpc error: code = NotFound desc = could not find container \"a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f\": container with ID starting with a24328cc008fce4129907d9d3d8bf83db327f50ad3514a2c517f14ad38ab649f not found: ID does not exist" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.530641 5045 scope.go:117] "RemoveContainer" containerID="4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.530913 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad"} err="failed to get container status \"4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad\": rpc error: code = NotFound desc = could not find container \"4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad\": container with ID starting with 4aa489ee32093a91476a6f5b72e8f03177ee5fd2df8174d7235598981fbb81ad not found: ID does not exist" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.530936 5045 scope.go:117] "RemoveContainer" containerID="19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.531185 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63"} err="failed to get container status \"19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63\": rpc error: code = NotFound desc = could not find container \"19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63\": container with ID starting with 19ebc55c0afa751936fba62211391d5b1d098bf4e15395f8c0c495865119af63 not found: ID does not exist" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.541418 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3533471-c586-4b49-8130-65d6100260d5-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.818767 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.885508 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.897544 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:19:35 crc kubenswrapper[5045]: E1125 23:19:35.898018 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3533471-c586-4b49-8130-65d6100260d5" containerName="ceilometer-central-agent" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.898041 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3533471-c586-4b49-8130-65d6100260d5" containerName="ceilometer-central-agent" Nov 25 23:19:35 crc kubenswrapper[5045]: E1125 23:19:35.898056 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3533471-c586-4b49-8130-65d6100260d5" containerName="proxy-httpd" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.898065 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3533471-c586-4b49-8130-65d6100260d5" containerName="proxy-httpd" Nov 25 23:19:35 crc kubenswrapper[5045]: E1125 23:19:35.898090 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3533471-c586-4b49-8130-65d6100260d5" containerName="ceilometer-notification-agent" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.898099 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3533471-c586-4b49-8130-65d6100260d5" containerName="ceilometer-notification-agent" Nov 25 23:19:35 crc kubenswrapper[5045]: E1125 23:19:35.898118 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3533471-c586-4b49-8130-65d6100260d5" containerName="sg-core" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.898127 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3533471-c586-4b49-8130-65d6100260d5" containerName="sg-core" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.898313 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3533471-c586-4b49-8130-65d6100260d5" containerName="proxy-httpd" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.898338 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3533471-c586-4b49-8130-65d6100260d5" containerName="sg-core" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.898352 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3533471-c586-4b49-8130-65d6100260d5" containerName="ceilometer-notification-agent" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.898378 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3533471-c586-4b49-8130-65d6100260d5" containerName="ceilometer-central-agent" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.900337 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.903170 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.903596 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.906260 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.987389 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5d09e5d1-9221-4785-abe2-8763781bc338-run-httpd\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.987437 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-config-data\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.987552 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqlzp\" (UniqueName: \"kubernetes.io/projected/5d09e5d1-9221-4785-abe2-8763781bc338-kube-api-access-rqlzp\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.987593 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.987638 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.987677 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5d09e5d1-9221-4785-abe2-8763781bc338-log-httpd\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:35 crc kubenswrapper[5045]: I1125 23:19:35.987759 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-scripts\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:36 crc kubenswrapper[5045]: I1125 23:19:36.088739 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5d09e5d1-9221-4785-abe2-8763781bc338-run-httpd\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:36 crc kubenswrapper[5045]: I1125 23:19:36.088798 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-config-data\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:36 crc kubenswrapper[5045]: I1125 23:19:36.088843 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqlzp\" (UniqueName: \"kubernetes.io/projected/5d09e5d1-9221-4785-abe2-8763781bc338-kube-api-access-rqlzp\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:36 crc kubenswrapper[5045]: I1125 23:19:36.088866 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:36 crc kubenswrapper[5045]: I1125 23:19:36.088886 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:36 crc kubenswrapper[5045]: I1125 23:19:36.088924 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5d09e5d1-9221-4785-abe2-8763781bc338-log-httpd\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:36 crc kubenswrapper[5045]: I1125 23:19:36.088965 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-scripts\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:36 crc kubenswrapper[5045]: I1125 23:19:36.089766 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5d09e5d1-9221-4785-abe2-8763781bc338-run-httpd\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:36 crc kubenswrapper[5045]: I1125 23:19:36.090024 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5d09e5d1-9221-4785-abe2-8763781bc338-log-httpd\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:36 crc kubenswrapper[5045]: I1125 23:19:36.094923 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:36 crc kubenswrapper[5045]: I1125 23:19:36.096127 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-config-data\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:36 crc kubenswrapper[5045]: I1125 23:19:36.098285 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-scripts\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:36 crc kubenswrapper[5045]: I1125 23:19:36.103285 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:36 crc kubenswrapper[5045]: I1125 23:19:36.107055 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqlzp\" (UniqueName: \"kubernetes.io/projected/5d09e5d1-9221-4785-abe2-8763781bc338-kube-api-access-rqlzp\") pod \"ceilometer-0\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " pod="openstack/ceilometer-0" Nov 25 23:19:36 crc kubenswrapper[5045]: I1125 23:19:36.221366 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:19:36 crc kubenswrapper[5045]: I1125 23:19:36.407651 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3533471-c586-4b49-8130-65d6100260d5" path="/var/lib/kubelet/pods/c3533471-c586-4b49-8130-65d6100260d5/volumes" Nov 25 23:19:36 crc kubenswrapper[5045]: W1125 23:19:36.659150 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5d09e5d1_9221_4785_abe2_8763781bc338.slice/crio-cf4df5b13e6bd370890d25f923da887027e546267544d49bdec7296d6e13bcd5 WatchSource:0}: Error finding container cf4df5b13e6bd370890d25f923da887027e546267544d49bdec7296d6e13bcd5: Status 404 returned error can't find the container with id cf4df5b13e6bd370890d25f923da887027e546267544d49bdec7296d6e13bcd5 Nov 25 23:19:36 crc kubenswrapper[5045]: I1125 23:19:36.663083 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:19:37 crc kubenswrapper[5045]: I1125 23:19:37.483007 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5d09e5d1-9221-4785-abe2-8763781bc338","Type":"ContainerStarted","Data":"34b339cadc972286c598dda1220fde24697d8e7e3a84a277cc7cba888b24dfc7"} Nov 25 23:19:37 crc kubenswrapper[5045]: I1125 23:19:37.483295 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5d09e5d1-9221-4785-abe2-8763781bc338","Type":"ContainerStarted","Data":"cf4df5b13e6bd370890d25f923da887027e546267544d49bdec7296d6e13bcd5"} Nov 25 23:19:38 crc kubenswrapper[5045]: I1125 23:19:38.492070 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5d09e5d1-9221-4785-abe2-8763781bc338","Type":"ContainerStarted","Data":"1b1787917344e8a2e0756c031616e6565906d3a31e1f75f0bc4cc2eb0b981ae1"} Nov 25 23:19:39 crc kubenswrapper[5045]: I1125 23:19:39.504074 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5d09e5d1-9221-4785-abe2-8763781bc338","Type":"ContainerStarted","Data":"761f970d4b21945f7c6cfca9f3b520aff4bf72adf5b95fce325ce614f5d1b5c5"} Nov 25 23:19:41 crc kubenswrapper[5045]: I1125 23:19:41.534977 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5d09e5d1-9221-4785-abe2-8763781bc338","Type":"ContainerStarted","Data":"84c45197d1e435cf0d6adcc58f6a9a6daabb6262355b5d05181a02173203716a"} Nov 25 23:19:41 crc kubenswrapper[5045]: I1125 23:19:41.535574 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 23:19:41 crc kubenswrapper[5045]: I1125 23:19:41.564984 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.606269288 podStartE2EDuration="6.564965653s" podCreationTimestamp="2025-11-25 23:19:35 +0000 UTC" firstStartedPulling="2025-11-25 23:19:36.661824259 +0000 UTC m=+1233.019483361" lastFinishedPulling="2025-11-25 23:19:40.620520584 +0000 UTC m=+1236.978179726" observedRunningTime="2025-11-25 23:19:41.55929324 +0000 UTC m=+1237.916952362" watchObservedRunningTime="2025-11-25 23:19:41.564965653 +0000 UTC m=+1237.922624765" Nov 25 23:19:43 crc kubenswrapper[5045]: I1125 23:19:43.556750 5045 generic.go:334] "Generic (PLEG): container finished" podID="1076c0c8-e031-42c0-9978-5ca0d1cbd401" containerID="e2f5b5fa1a31c1ed4a0841060c620289e8c2ed72d328c8fe54223016ffe240d0" exitCode=0 Nov 25 23:19:43 crc kubenswrapper[5045]: I1125 23:19:43.557009 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-gcz6n" event={"ID":"1076c0c8-e031-42c0-9978-5ca0d1cbd401","Type":"ContainerDied","Data":"e2f5b5fa1a31c1ed4a0841060c620289e8c2ed72d328c8fe54223016ffe240d0"} Nov 25 23:19:44 crc kubenswrapper[5045]: I1125 23:19:44.965762 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-gcz6n" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.068805 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1076c0c8-e031-42c0-9978-5ca0d1cbd401-combined-ca-bundle\") pod \"1076c0c8-e031-42c0-9978-5ca0d1cbd401\" (UID: \"1076c0c8-e031-42c0-9978-5ca0d1cbd401\") " Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.068997 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1076c0c8-e031-42c0-9978-5ca0d1cbd401-config-data\") pod \"1076c0c8-e031-42c0-9978-5ca0d1cbd401\" (UID: \"1076c0c8-e031-42c0-9978-5ca0d1cbd401\") " Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.069055 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1076c0c8-e031-42c0-9978-5ca0d1cbd401-scripts\") pod \"1076c0c8-e031-42c0-9978-5ca0d1cbd401\" (UID: \"1076c0c8-e031-42c0-9978-5ca0d1cbd401\") " Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.069116 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wx5m9\" (UniqueName: \"kubernetes.io/projected/1076c0c8-e031-42c0-9978-5ca0d1cbd401-kube-api-access-wx5m9\") pod \"1076c0c8-e031-42c0-9978-5ca0d1cbd401\" (UID: \"1076c0c8-e031-42c0-9978-5ca0d1cbd401\") " Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.076699 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1076c0c8-e031-42c0-9978-5ca0d1cbd401-kube-api-access-wx5m9" (OuterVolumeSpecName: "kube-api-access-wx5m9") pod "1076c0c8-e031-42c0-9978-5ca0d1cbd401" (UID: "1076c0c8-e031-42c0-9978-5ca0d1cbd401"). InnerVolumeSpecName "kube-api-access-wx5m9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.080948 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1076c0c8-e031-42c0-9978-5ca0d1cbd401-scripts" (OuterVolumeSpecName: "scripts") pod "1076c0c8-e031-42c0-9978-5ca0d1cbd401" (UID: "1076c0c8-e031-42c0-9978-5ca0d1cbd401"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.101250 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1076c0c8-e031-42c0-9978-5ca0d1cbd401-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1076c0c8-e031-42c0-9978-5ca0d1cbd401" (UID: "1076c0c8-e031-42c0-9978-5ca0d1cbd401"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.101723 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1076c0c8-e031-42c0-9978-5ca0d1cbd401-config-data" (OuterVolumeSpecName: "config-data") pod "1076c0c8-e031-42c0-9978-5ca0d1cbd401" (UID: "1076c0c8-e031-42c0-9978-5ca0d1cbd401"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.170622 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1076c0c8-e031-42c0-9978-5ca0d1cbd401-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.170646 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1076c0c8-e031-42c0-9978-5ca0d1cbd401-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.170656 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wx5m9\" (UniqueName: \"kubernetes.io/projected/1076c0c8-e031-42c0-9978-5ca0d1cbd401-kube-api-access-wx5m9\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.170665 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1076c0c8-e031-42c0-9978-5ca0d1cbd401-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.584103 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-gcz6n" event={"ID":"1076c0c8-e031-42c0-9978-5ca0d1cbd401","Type":"ContainerDied","Data":"683de488209ef93056812460cd88d142e14734a941a920971364eeee621c4e17"} Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.584473 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="683de488209ef93056812460cd88d142e14734a941a920971364eeee621c4e17" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.584238 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-gcz6n" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.728216 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 23:19:45 crc kubenswrapper[5045]: E1125 23:19:45.728859 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1076c0c8-e031-42c0-9978-5ca0d1cbd401" containerName="nova-cell0-conductor-db-sync" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.728889 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="1076c0c8-e031-42c0-9978-5ca0d1cbd401" containerName="nova-cell0-conductor-db-sync" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.729198 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="1076c0c8-e031-42c0-9978-5ca0d1cbd401" containerName="nova-cell0-conductor-db-sync" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.730163 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.732610 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-ww4nf" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.736612 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.740271 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.780859 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98d79db2-7dcf-4b5a-8d2b-bd1a799b843f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"98d79db2-7dcf-4b5a-8d2b-bd1a799b843f\") " pod="openstack/nova-cell0-conductor-0" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.780971 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98d79db2-7dcf-4b5a-8d2b-bd1a799b843f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"98d79db2-7dcf-4b5a-8d2b-bd1a799b843f\") " pod="openstack/nova-cell0-conductor-0" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.781007 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pttnr\" (UniqueName: \"kubernetes.io/projected/98d79db2-7dcf-4b5a-8d2b-bd1a799b843f-kube-api-access-pttnr\") pod \"nova-cell0-conductor-0\" (UID: \"98d79db2-7dcf-4b5a-8d2b-bd1a799b843f\") " pod="openstack/nova-cell0-conductor-0" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.882966 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98d79db2-7dcf-4b5a-8d2b-bd1a799b843f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"98d79db2-7dcf-4b5a-8d2b-bd1a799b843f\") " pod="openstack/nova-cell0-conductor-0" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.883075 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98d79db2-7dcf-4b5a-8d2b-bd1a799b843f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"98d79db2-7dcf-4b5a-8d2b-bd1a799b843f\") " pod="openstack/nova-cell0-conductor-0" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.883102 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pttnr\" (UniqueName: \"kubernetes.io/projected/98d79db2-7dcf-4b5a-8d2b-bd1a799b843f-kube-api-access-pttnr\") pod \"nova-cell0-conductor-0\" (UID: \"98d79db2-7dcf-4b5a-8d2b-bd1a799b843f\") " pod="openstack/nova-cell0-conductor-0" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.888522 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98d79db2-7dcf-4b5a-8d2b-bd1a799b843f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"98d79db2-7dcf-4b5a-8d2b-bd1a799b843f\") " pod="openstack/nova-cell0-conductor-0" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.888835 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98d79db2-7dcf-4b5a-8d2b-bd1a799b843f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"98d79db2-7dcf-4b5a-8d2b-bd1a799b843f\") " pod="openstack/nova-cell0-conductor-0" Nov 25 23:19:45 crc kubenswrapper[5045]: I1125 23:19:45.910033 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pttnr\" (UniqueName: \"kubernetes.io/projected/98d79db2-7dcf-4b5a-8d2b-bd1a799b843f-kube-api-access-pttnr\") pod \"nova-cell0-conductor-0\" (UID: \"98d79db2-7dcf-4b5a-8d2b-bd1a799b843f\") " pod="openstack/nova-cell0-conductor-0" Nov 25 23:19:46 crc kubenswrapper[5045]: I1125 23:19:46.063630 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 23:19:46 crc kubenswrapper[5045]: I1125 23:19:46.513359 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 23:19:46 crc kubenswrapper[5045]: W1125 23:19:46.513972 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod98d79db2_7dcf_4b5a_8d2b_bd1a799b843f.slice/crio-52251cc6fd907b13383f99acd811fb03d02415daffe75584d519f1dab488a4ac WatchSource:0}: Error finding container 52251cc6fd907b13383f99acd811fb03d02415daffe75584d519f1dab488a4ac: Status 404 returned error can't find the container with id 52251cc6fd907b13383f99acd811fb03d02415daffe75584d519f1dab488a4ac Nov 25 23:19:46 crc kubenswrapper[5045]: I1125 23:19:46.606603 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"98d79db2-7dcf-4b5a-8d2b-bd1a799b843f","Type":"ContainerStarted","Data":"52251cc6fd907b13383f99acd811fb03d02415daffe75584d519f1dab488a4ac"} Nov 25 23:19:47 crc kubenswrapper[5045]: I1125 23:19:47.619538 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"98d79db2-7dcf-4b5a-8d2b-bd1a799b843f","Type":"ContainerStarted","Data":"336446a6c395e11ce1be8b362b4a45633e8064172a025a775379d38c88088992"} Nov 25 23:19:47 crc kubenswrapper[5045]: I1125 23:19:47.620541 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 25 23:19:47 crc kubenswrapper[5045]: I1125 23:19:47.644573 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.644554939 podStartE2EDuration="2.644554939s" podCreationTimestamp="2025-11-25 23:19:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:19:47.63405005 +0000 UTC m=+1243.991709182" watchObservedRunningTime="2025-11-25 23:19:47.644554939 +0000 UTC m=+1244.002214061" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.111232 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.657046 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-4psjq"] Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.658977 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-4psjq" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.661854 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.663955 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.668601 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-4psjq"] Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.694577 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3327fc22-a987-47ad-a327-e5a7a2306f6e-config-data\") pod \"nova-cell0-cell-mapping-4psjq\" (UID: \"3327fc22-a987-47ad-a327-e5a7a2306f6e\") " pod="openstack/nova-cell0-cell-mapping-4psjq" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.694642 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3327fc22-a987-47ad-a327-e5a7a2306f6e-scripts\") pod \"nova-cell0-cell-mapping-4psjq\" (UID: \"3327fc22-a987-47ad-a327-e5a7a2306f6e\") " pod="openstack/nova-cell0-cell-mapping-4psjq" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.694683 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3327fc22-a987-47ad-a327-e5a7a2306f6e-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-4psjq\" (UID: \"3327fc22-a987-47ad-a327-e5a7a2306f6e\") " pod="openstack/nova-cell0-cell-mapping-4psjq" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.694758 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdldh\" (UniqueName: \"kubernetes.io/projected/3327fc22-a987-47ad-a327-e5a7a2306f6e-kube-api-access-tdldh\") pod \"nova-cell0-cell-mapping-4psjq\" (UID: \"3327fc22-a987-47ad-a327-e5a7a2306f6e\") " pod="openstack/nova-cell0-cell-mapping-4psjq" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.796622 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3327fc22-a987-47ad-a327-e5a7a2306f6e-config-data\") pod \"nova-cell0-cell-mapping-4psjq\" (UID: \"3327fc22-a987-47ad-a327-e5a7a2306f6e\") " pod="openstack/nova-cell0-cell-mapping-4psjq" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.797017 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3327fc22-a987-47ad-a327-e5a7a2306f6e-scripts\") pod \"nova-cell0-cell-mapping-4psjq\" (UID: \"3327fc22-a987-47ad-a327-e5a7a2306f6e\") " pod="openstack/nova-cell0-cell-mapping-4psjq" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.797065 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3327fc22-a987-47ad-a327-e5a7a2306f6e-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-4psjq\" (UID: \"3327fc22-a987-47ad-a327-e5a7a2306f6e\") " pod="openstack/nova-cell0-cell-mapping-4psjq" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.797122 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdldh\" (UniqueName: \"kubernetes.io/projected/3327fc22-a987-47ad-a327-e5a7a2306f6e-kube-api-access-tdldh\") pod \"nova-cell0-cell-mapping-4psjq\" (UID: \"3327fc22-a987-47ad-a327-e5a7a2306f6e\") " pod="openstack/nova-cell0-cell-mapping-4psjq" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.804055 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3327fc22-a987-47ad-a327-e5a7a2306f6e-scripts\") pod \"nova-cell0-cell-mapping-4psjq\" (UID: \"3327fc22-a987-47ad-a327-e5a7a2306f6e\") " pod="openstack/nova-cell0-cell-mapping-4psjq" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.810688 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3327fc22-a987-47ad-a327-e5a7a2306f6e-config-data\") pod \"nova-cell0-cell-mapping-4psjq\" (UID: \"3327fc22-a987-47ad-a327-e5a7a2306f6e\") " pod="openstack/nova-cell0-cell-mapping-4psjq" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.815113 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdldh\" (UniqueName: \"kubernetes.io/projected/3327fc22-a987-47ad-a327-e5a7a2306f6e-kube-api-access-tdldh\") pod \"nova-cell0-cell-mapping-4psjq\" (UID: \"3327fc22-a987-47ad-a327-e5a7a2306f6e\") " pod="openstack/nova-cell0-cell-mapping-4psjq" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.822088 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3327fc22-a987-47ad-a327-e5a7a2306f6e-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-4psjq\" (UID: \"3327fc22-a987-47ad-a327-e5a7a2306f6e\") " pod="openstack/nova-cell0-cell-mapping-4psjq" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.847518 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.852688 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.854471 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.898266 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f7dc6cc-20be-413d-9e0a-e5a14619f323-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6f7dc6cc-20be-413d-9e0a-e5a14619f323\") " pod="openstack/nova-metadata-0" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.898332 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f7dc6cc-20be-413d-9e0a-e5a14619f323-config-data\") pod \"nova-metadata-0\" (UID: \"6f7dc6cc-20be-413d-9e0a-e5a14619f323\") " pod="openstack/nova-metadata-0" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.898395 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfj7k\" (UniqueName: \"kubernetes.io/projected/6f7dc6cc-20be-413d-9e0a-e5a14619f323-kube-api-access-hfj7k\") pod \"nova-metadata-0\" (UID: \"6f7dc6cc-20be-413d-9e0a-e5a14619f323\") " pod="openstack/nova-metadata-0" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.898694 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f7dc6cc-20be-413d-9e0a-e5a14619f323-logs\") pod \"nova-metadata-0\" (UID: \"6f7dc6cc-20be-413d-9e0a-e5a14619f323\") " pod="openstack/nova-metadata-0" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.924435 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.984582 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.985864 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.993152 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.998438 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8b8cf6657-z5dhk"] Nov 25 23:19:51 crc kubenswrapper[5045]: I1125 23:19:51.999921 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.000653 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f7dc6cc-20be-413d-9e0a-e5a14619f323-logs\") pod \"nova-metadata-0\" (UID: \"6f7dc6cc-20be-413d-9e0a-e5a14619f323\") " pod="openstack/nova-metadata-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.000687 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dd606b7-843d-4880-9364-e4a7f629f3a6-config-data\") pod \"nova-api-0\" (UID: \"5dd606b7-843d-4880-9364-e4a7f629f3a6\") " pod="openstack/nova-api-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.000740 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5dd606b7-843d-4880-9364-e4a7f629f3a6-logs\") pod \"nova-api-0\" (UID: \"5dd606b7-843d-4880-9364-e4a7f629f3a6\") " pod="openstack/nova-api-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.000768 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f7dc6cc-20be-413d-9e0a-e5a14619f323-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6f7dc6cc-20be-413d-9e0a-e5a14619f323\") " pod="openstack/nova-metadata-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.000802 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f7dc6cc-20be-413d-9e0a-e5a14619f323-config-data\") pod \"nova-metadata-0\" (UID: \"6f7dc6cc-20be-413d-9e0a-e5a14619f323\") " pod="openstack/nova-metadata-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.000831 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hfj7k\" (UniqueName: \"kubernetes.io/projected/6f7dc6cc-20be-413d-9e0a-e5a14619f323-kube-api-access-hfj7k\") pod \"nova-metadata-0\" (UID: \"6f7dc6cc-20be-413d-9e0a-e5a14619f323\") " pod="openstack/nova-metadata-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.000855 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dd606b7-843d-4880-9364-e4a7f629f3a6-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5dd606b7-843d-4880-9364-e4a7f629f3a6\") " pod="openstack/nova-api-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.000888 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hds8g\" (UniqueName: \"kubernetes.io/projected/5dd606b7-843d-4880-9364-e4a7f629f3a6-kube-api-access-hds8g\") pod \"nova-api-0\" (UID: \"5dd606b7-843d-4880-9364-e4a7f629f3a6\") " pod="openstack/nova-api-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.001311 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f7dc6cc-20be-413d-9e0a-e5a14619f323-logs\") pod \"nova-metadata-0\" (UID: \"6f7dc6cc-20be-413d-9e0a-e5a14619f323\") " pod="openstack/nova-metadata-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.004805 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-4psjq" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.022418 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f7dc6cc-20be-413d-9e0a-e5a14619f323-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6f7dc6cc-20be-413d-9e0a-e5a14619f323\") " pod="openstack/nova-metadata-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.027398 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f7dc6cc-20be-413d-9e0a-e5a14619f323-config-data\") pod \"nova-metadata-0\" (UID: \"6f7dc6cc-20be-413d-9e0a-e5a14619f323\") " pod="openstack/nova-metadata-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.050730 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfj7k\" (UniqueName: \"kubernetes.io/projected/6f7dc6cc-20be-413d-9e0a-e5a14619f323-kube-api-access-hfj7k\") pod \"nova-metadata-0\" (UID: \"6f7dc6cc-20be-413d-9e0a-e5a14619f323\") " pod="openstack/nova-metadata-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.050794 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.070374 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b8cf6657-z5dhk"] Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.100996 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.102068 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.102794 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-dns-svc\") pod \"dnsmasq-dns-8b8cf6657-z5dhk\" (UID: \"0b4bf700-b50f-412f-bfb4-cf401cd618be\") " pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.102857 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-ovsdbserver-nb\") pod \"dnsmasq-dns-8b8cf6657-z5dhk\" (UID: \"0b4bf700-b50f-412f-bfb4-cf401cd618be\") " pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.102913 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dd606b7-843d-4880-9364-e4a7f629f3a6-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5dd606b7-843d-4880-9364-e4a7f629f3a6\") " pod="openstack/nova-api-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.102937 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqlsb\" (UniqueName: \"kubernetes.io/projected/0b4bf700-b50f-412f-bfb4-cf401cd618be-kube-api-access-kqlsb\") pod \"dnsmasq-dns-8b8cf6657-z5dhk\" (UID: \"0b4bf700-b50f-412f-bfb4-cf401cd618be\") " pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.102971 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hds8g\" (UniqueName: \"kubernetes.io/projected/5dd606b7-843d-4880-9364-e4a7f629f3a6-kube-api-access-hds8g\") pod \"nova-api-0\" (UID: \"5dd606b7-843d-4880-9364-e4a7f629f3a6\") " pod="openstack/nova-api-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.103007 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-ovsdbserver-sb\") pod \"dnsmasq-dns-8b8cf6657-z5dhk\" (UID: \"0b4bf700-b50f-412f-bfb4-cf401cd618be\") " pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.103044 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dd606b7-843d-4880-9364-e4a7f629f3a6-config-data\") pod \"nova-api-0\" (UID: \"5dd606b7-843d-4880-9364-e4a7f629f3a6\") " pod="openstack/nova-api-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.103060 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-config\") pod \"dnsmasq-dns-8b8cf6657-z5dhk\" (UID: \"0b4bf700-b50f-412f-bfb4-cf401cd618be\") " pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.103086 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5dd606b7-843d-4880-9364-e4a7f629f3a6-logs\") pod \"nova-api-0\" (UID: \"5dd606b7-843d-4880-9364-e4a7f629f3a6\") " pod="openstack/nova-api-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.103921 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5dd606b7-843d-4880-9364-e4a7f629f3a6-logs\") pod \"nova-api-0\" (UID: \"5dd606b7-843d-4880-9364-e4a7f629f3a6\") " pod="openstack/nova-api-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.108470 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.118913 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dd606b7-843d-4880-9364-e4a7f629f3a6-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5dd606b7-843d-4880-9364-e4a7f629f3a6\") " pod="openstack/nova-api-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.121019 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dd606b7-843d-4880-9364-e4a7f629f3a6-config-data\") pod \"nova-api-0\" (UID: \"5dd606b7-843d-4880-9364-e4a7f629f3a6\") " pod="openstack/nova-api-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.131295 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hds8g\" (UniqueName: \"kubernetes.io/projected/5dd606b7-843d-4880-9364-e4a7f629f3a6-kube-api-access-hds8g\") pod \"nova-api-0\" (UID: \"5dd606b7-843d-4880-9364-e4a7f629f3a6\") " pod="openstack/nova-api-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.135640 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.184597 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.186017 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.187821 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.194303 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.204572 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83215ee2-350f-4838-9973-6a8417a491d0-config-data\") pod \"nova-scheduler-0\" (UID: \"83215ee2-350f-4838-9973-6a8417a491d0\") " pod="openstack/nova-scheduler-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.204616 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-ovsdbserver-nb\") pod \"dnsmasq-dns-8b8cf6657-z5dhk\" (UID: \"0b4bf700-b50f-412f-bfb4-cf401cd618be\") " pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.204635 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83215ee2-350f-4838-9973-6a8417a491d0-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"83215ee2-350f-4838-9973-6a8417a491d0\") " pod="openstack/nova-scheduler-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.204669 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqlsb\" (UniqueName: \"kubernetes.io/projected/0b4bf700-b50f-412f-bfb4-cf401cd618be-kube-api-access-kqlsb\") pod \"dnsmasq-dns-8b8cf6657-z5dhk\" (UID: \"0b4bf700-b50f-412f-bfb4-cf401cd618be\") " pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.204739 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4nc8\" (UniqueName: \"kubernetes.io/projected/83215ee2-350f-4838-9973-6a8417a491d0-kube-api-access-c4nc8\") pod \"nova-scheduler-0\" (UID: \"83215ee2-350f-4838-9973-6a8417a491d0\") " pod="openstack/nova-scheduler-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.204785 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4m2r\" (UniqueName: \"kubernetes.io/projected/285f19f1-962e-4a44-bf63-50137f6aa140-kube-api-access-q4m2r\") pod \"nova-cell1-novncproxy-0\" (UID: \"285f19f1-962e-4a44-bf63-50137f6aa140\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.204831 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-ovsdbserver-sb\") pod \"dnsmasq-dns-8b8cf6657-z5dhk\" (UID: \"0b4bf700-b50f-412f-bfb4-cf401cd618be\") " pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.204858 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/285f19f1-962e-4a44-bf63-50137f6aa140-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"285f19f1-962e-4a44-bf63-50137f6aa140\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.204907 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-config\") pod \"dnsmasq-dns-8b8cf6657-z5dhk\" (UID: \"0b4bf700-b50f-412f-bfb4-cf401cd618be\") " pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.204969 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/285f19f1-962e-4a44-bf63-50137f6aa140-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"285f19f1-962e-4a44-bf63-50137f6aa140\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.204991 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-dns-svc\") pod \"dnsmasq-dns-8b8cf6657-z5dhk\" (UID: \"0b4bf700-b50f-412f-bfb4-cf401cd618be\") " pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.205628 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-dns-svc\") pod \"dnsmasq-dns-8b8cf6657-z5dhk\" (UID: \"0b4bf700-b50f-412f-bfb4-cf401cd618be\") " pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.205628 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-config\") pod \"dnsmasq-dns-8b8cf6657-z5dhk\" (UID: \"0b4bf700-b50f-412f-bfb4-cf401cd618be\") " pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.206278 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-ovsdbserver-sb\") pod \"dnsmasq-dns-8b8cf6657-z5dhk\" (UID: \"0b4bf700-b50f-412f-bfb4-cf401cd618be\") " pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.206800 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-ovsdbserver-nb\") pod \"dnsmasq-dns-8b8cf6657-z5dhk\" (UID: \"0b4bf700-b50f-412f-bfb4-cf401cd618be\") " pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.218040 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.227875 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqlsb\" (UniqueName: \"kubernetes.io/projected/0b4bf700-b50f-412f-bfb4-cf401cd618be-kube-api-access-kqlsb\") pod \"dnsmasq-dns-8b8cf6657-z5dhk\" (UID: \"0b4bf700-b50f-412f-bfb4-cf401cd618be\") " pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.308766 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.309817 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83215ee2-350f-4838-9973-6a8417a491d0-config-data\") pod \"nova-scheduler-0\" (UID: \"83215ee2-350f-4838-9973-6a8417a491d0\") " pod="openstack/nova-scheduler-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.309873 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83215ee2-350f-4838-9973-6a8417a491d0-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"83215ee2-350f-4838-9973-6a8417a491d0\") " pod="openstack/nova-scheduler-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.309938 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4nc8\" (UniqueName: \"kubernetes.io/projected/83215ee2-350f-4838-9973-6a8417a491d0-kube-api-access-c4nc8\") pod \"nova-scheduler-0\" (UID: \"83215ee2-350f-4838-9973-6a8417a491d0\") " pod="openstack/nova-scheduler-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.309978 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4m2r\" (UniqueName: \"kubernetes.io/projected/285f19f1-962e-4a44-bf63-50137f6aa140-kube-api-access-q4m2r\") pod \"nova-cell1-novncproxy-0\" (UID: \"285f19f1-962e-4a44-bf63-50137f6aa140\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.310011 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/285f19f1-962e-4a44-bf63-50137f6aa140-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"285f19f1-962e-4a44-bf63-50137f6aa140\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.310085 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/285f19f1-962e-4a44-bf63-50137f6aa140-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"285f19f1-962e-4a44-bf63-50137f6aa140\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.314778 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83215ee2-350f-4838-9973-6a8417a491d0-config-data\") pod \"nova-scheduler-0\" (UID: \"83215ee2-350f-4838-9973-6a8417a491d0\") " pod="openstack/nova-scheduler-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.315670 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/285f19f1-962e-4a44-bf63-50137f6aa140-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"285f19f1-962e-4a44-bf63-50137f6aa140\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.315906 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83215ee2-350f-4838-9973-6a8417a491d0-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"83215ee2-350f-4838-9973-6a8417a491d0\") " pod="openstack/nova-scheduler-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.325341 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4nc8\" (UniqueName: \"kubernetes.io/projected/83215ee2-350f-4838-9973-6a8417a491d0-kube-api-access-c4nc8\") pod \"nova-scheduler-0\" (UID: \"83215ee2-350f-4838-9973-6a8417a491d0\") " pod="openstack/nova-scheduler-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.329049 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/285f19f1-962e-4a44-bf63-50137f6aa140-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"285f19f1-962e-4a44-bf63-50137f6aa140\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.361092 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4m2r\" (UniqueName: \"kubernetes.io/projected/285f19f1-962e-4a44-bf63-50137f6aa140-kube-api-access-q4m2r\") pod \"nova-cell1-novncproxy-0\" (UID: \"285f19f1-962e-4a44-bf63-50137f6aa140\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.445171 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.467228 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.522253 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.552065 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-4psjq"] Nov 25 23:19:52 crc kubenswrapper[5045]: W1125 23:19:52.592662 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3327fc22_a987_47ad_a327_e5a7a2306f6e.slice/crio-730e5a04c33d622bb9f5b270a73f8f16684d0fbc943b1cfc4451c46b2044a301 WatchSource:0}: Error finding container 730e5a04c33d622bb9f5b270a73f8f16684d0fbc943b1cfc4451c46b2044a301: Status 404 returned error can't find the container with id 730e5a04c33d622bb9f5b270a73f8f16684d0fbc943b1cfc4451c46b2044a301 Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.689550 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-4psjq" event={"ID":"3327fc22-a987-47ad-a327-e5a7a2306f6e","Type":"ContainerStarted","Data":"730e5a04c33d622bb9f5b270a73f8f16684d0fbc943b1cfc4451c46b2044a301"} Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.724166 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.747628 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vd62p"] Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.749197 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-vd62p" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.754844 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.755031 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.758880 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vd62p"] Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.792346 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b8cf6657-z5dhk"] Nov 25 23:19:52 crc kubenswrapper[5045]: W1125 23:19:52.796745 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0b4bf700_b50f_412f_bfb4_cf401cd618be.slice/crio-1b51d476e1c54cbf2f95260bd6d40ca3efad4f54fe1db13a26b99caf2772bfa0 WatchSource:0}: Error finding container 1b51d476e1c54cbf2f95260bd6d40ca3efad4f54fe1db13a26b99caf2772bfa0: Status 404 returned error can't find the container with id 1b51d476e1c54cbf2f95260bd6d40ca3efad4f54fe1db13a26b99caf2772bfa0 Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.822304 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6db602b1-d6b8-410c-bd2f-fad64474ac8f-config-data\") pod \"nova-cell1-conductor-db-sync-vd62p\" (UID: \"6db602b1-d6b8-410c-bd2f-fad64474ac8f\") " pod="openstack/nova-cell1-conductor-db-sync-vd62p" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.822409 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6db602b1-d6b8-410c-bd2f-fad64474ac8f-scripts\") pod \"nova-cell1-conductor-db-sync-vd62p\" (UID: \"6db602b1-d6b8-410c-bd2f-fad64474ac8f\") " pod="openstack/nova-cell1-conductor-db-sync-vd62p" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.822440 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6db602b1-d6b8-410c-bd2f-fad64474ac8f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-vd62p\" (UID: \"6db602b1-d6b8-410c-bd2f-fad64474ac8f\") " pod="openstack/nova-cell1-conductor-db-sync-vd62p" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.822566 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvzdr\" (UniqueName: \"kubernetes.io/projected/6db602b1-d6b8-410c-bd2f-fad64474ac8f-kube-api-access-jvzdr\") pod \"nova-cell1-conductor-db-sync-vd62p\" (UID: \"6db602b1-d6b8-410c-bd2f-fad64474ac8f\") " pod="openstack/nova-cell1-conductor-db-sync-vd62p" Nov 25 23:19:52 crc kubenswrapper[5045]: W1125 23:19:52.858051 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5dd606b7_843d_4880_9364_e4a7f629f3a6.slice/crio-89e344cdf19c69a6862cd233fb24487536f79492ec7c006a4ceda0229afb6488 WatchSource:0}: Error finding container 89e344cdf19c69a6862cd233fb24487536f79492ec7c006a4ceda0229afb6488: Status 404 returned error can't find the container with id 89e344cdf19c69a6862cd233fb24487536f79492ec7c006a4ceda0229afb6488 Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.869252 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.925065 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6db602b1-d6b8-410c-bd2f-fad64474ac8f-config-data\") pod \"nova-cell1-conductor-db-sync-vd62p\" (UID: \"6db602b1-d6b8-410c-bd2f-fad64474ac8f\") " pod="openstack/nova-cell1-conductor-db-sync-vd62p" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.925176 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6db602b1-d6b8-410c-bd2f-fad64474ac8f-scripts\") pod \"nova-cell1-conductor-db-sync-vd62p\" (UID: \"6db602b1-d6b8-410c-bd2f-fad64474ac8f\") " pod="openstack/nova-cell1-conductor-db-sync-vd62p" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.925219 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6db602b1-d6b8-410c-bd2f-fad64474ac8f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-vd62p\" (UID: \"6db602b1-d6b8-410c-bd2f-fad64474ac8f\") " pod="openstack/nova-cell1-conductor-db-sync-vd62p" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.925254 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvzdr\" (UniqueName: \"kubernetes.io/projected/6db602b1-d6b8-410c-bd2f-fad64474ac8f-kube-api-access-jvzdr\") pod \"nova-cell1-conductor-db-sync-vd62p\" (UID: \"6db602b1-d6b8-410c-bd2f-fad64474ac8f\") " pod="openstack/nova-cell1-conductor-db-sync-vd62p" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.930445 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6db602b1-d6b8-410c-bd2f-fad64474ac8f-config-data\") pod \"nova-cell1-conductor-db-sync-vd62p\" (UID: \"6db602b1-d6b8-410c-bd2f-fad64474ac8f\") " pod="openstack/nova-cell1-conductor-db-sync-vd62p" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.931010 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6db602b1-d6b8-410c-bd2f-fad64474ac8f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-vd62p\" (UID: \"6db602b1-d6b8-410c-bd2f-fad64474ac8f\") " pod="openstack/nova-cell1-conductor-db-sync-vd62p" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.932538 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6db602b1-d6b8-410c-bd2f-fad64474ac8f-scripts\") pod \"nova-cell1-conductor-db-sync-vd62p\" (UID: \"6db602b1-d6b8-410c-bd2f-fad64474ac8f\") " pod="openstack/nova-cell1-conductor-db-sync-vd62p" Nov 25 23:19:52 crc kubenswrapper[5045]: I1125 23:19:52.943697 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvzdr\" (UniqueName: \"kubernetes.io/projected/6db602b1-d6b8-410c-bd2f-fad64474ac8f-kube-api-access-jvzdr\") pod \"nova-cell1-conductor-db-sync-vd62p\" (UID: \"6db602b1-d6b8-410c-bd2f-fad64474ac8f\") " pod="openstack/nova-cell1-conductor-db-sync-vd62p" Nov 25 23:19:53 crc kubenswrapper[5045]: I1125 23:19:53.066515 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 23:19:53 crc kubenswrapper[5045]: I1125 23:19:53.072112 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-vd62p" Nov 25 23:19:53 crc kubenswrapper[5045]: W1125 23:19:53.074024 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod285f19f1_962e_4a44_bf63_50137f6aa140.slice/crio-5331934a7b92b17d89e69415def42b8c5c628c36fcd5fb8a21e5c82626a7b02a WatchSource:0}: Error finding container 5331934a7b92b17d89e69415def42b8c5c628c36fcd5fb8a21e5c82626a7b02a: Status 404 returned error can't find the container with id 5331934a7b92b17d89e69415def42b8c5c628c36fcd5fb8a21e5c82626a7b02a Nov 25 23:19:53 crc kubenswrapper[5045]: W1125 23:19:53.140746 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod83215ee2_350f_4838_9973_6a8417a491d0.slice/crio-889b657a85e05fd36b37e3817014bb257099861292d346268b752ce369175e46 WatchSource:0}: Error finding container 889b657a85e05fd36b37e3817014bb257099861292d346268b752ce369175e46: Status 404 returned error can't find the container with id 889b657a85e05fd36b37e3817014bb257099861292d346268b752ce369175e46 Nov 25 23:19:53 crc kubenswrapper[5045]: I1125 23:19:53.141871 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 23:19:53 crc kubenswrapper[5045]: I1125 23:19:53.535206 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vd62p"] Nov 25 23:19:53 crc kubenswrapper[5045]: I1125 23:19:53.707000 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-vd62p" event={"ID":"6db602b1-d6b8-410c-bd2f-fad64474ac8f","Type":"ContainerStarted","Data":"5aa4e843baaee23fd7ac32e7c2a694f5fa1e6cbe4298703060253fbf5b2a6479"} Nov 25 23:19:53 crc kubenswrapper[5045]: I1125 23:19:53.709000 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-4psjq" event={"ID":"3327fc22-a987-47ad-a327-e5a7a2306f6e","Type":"ContainerStarted","Data":"5f906f9aa6637b72df37cd3da3698e1f39ff90b438c626f71a25fb8725a00638"} Nov 25 23:19:53 crc kubenswrapper[5045]: I1125 23:19:53.711647 5045 generic.go:334] "Generic (PLEG): container finished" podID="0b4bf700-b50f-412f-bfb4-cf401cd618be" containerID="56deb0c2ad43ed4663f1adb7880bb90d45f08880be3072f0b423563877971139" exitCode=0 Nov 25 23:19:53 crc kubenswrapper[5045]: I1125 23:19:53.711751 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" event={"ID":"0b4bf700-b50f-412f-bfb4-cf401cd618be","Type":"ContainerDied","Data":"56deb0c2ad43ed4663f1adb7880bb90d45f08880be3072f0b423563877971139"} Nov 25 23:19:53 crc kubenswrapper[5045]: I1125 23:19:53.711788 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" event={"ID":"0b4bf700-b50f-412f-bfb4-cf401cd618be","Type":"ContainerStarted","Data":"1b51d476e1c54cbf2f95260bd6d40ca3efad4f54fe1db13a26b99caf2772bfa0"} Nov 25 23:19:53 crc kubenswrapper[5045]: I1125 23:19:53.713379 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"83215ee2-350f-4838-9973-6a8417a491d0","Type":"ContainerStarted","Data":"889b657a85e05fd36b37e3817014bb257099861292d346268b752ce369175e46"} Nov 25 23:19:53 crc kubenswrapper[5045]: I1125 23:19:53.714348 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5dd606b7-843d-4880-9364-e4a7f629f3a6","Type":"ContainerStarted","Data":"89e344cdf19c69a6862cd233fb24487536f79492ec7c006a4ceda0229afb6488"} Nov 25 23:19:53 crc kubenswrapper[5045]: I1125 23:19:53.719787 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6f7dc6cc-20be-413d-9e0a-e5a14619f323","Type":"ContainerStarted","Data":"8247596d446c5f462c52f02de3cb98de409d54e3fb5a35286731ff810c1e6f61"} Nov 25 23:19:53 crc kubenswrapper[5045]: I1125 23:19:53.724137 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"285f19f1-962e-4a44-bf63-50137f6aa140","Type":"ContainerStarted","Data":"5331934a7b92b17d89e69415def42b8c5c628c36fcd5fb8a21e5c82626a7b02a"} Nov 25 23:19:53 crc kubenswrapper[5045]: I1125 23:19:53.730736 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-4psjq" podStartSLOduration=2.730703855 podStartE2EDuration="2.730703855s" podCreationTimestamp="2025-11-25 23:19:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:19:53.722787654 +0000 UTC m=+1250.080446777" watchObservedRunningTime="2025-11-25 23:19:53.730703855 +0000 UTC m=+1250.088362967" Nov 25 23:19:54 crc kubenswrapper[5045]: I1125 23:19:54.738252 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-vd62p" event={"ID":"6db602b1-d6b8-410c-bd2f-fad64474ac8f","Type":"ContainerStarted","Data":"e6e9d3aac4bd2915e49caed05c7b9dde8a2c55d5aea94ba697da6d3fca729ffb"} Nov 25 23:19:54 crc kubenswrapper[5045]: I1125 23:19:54.742159 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" event={"ID":"0b4bf700-b50f-412f-bfb4-cf401cd618be","Type":"ContainerStarted","Data":"e515920d996ecd8b21843715cd9e86946c21765714b297bcff33c23d8433d535"} Nov 25 23:19:54 crc kubenswrapper[5045]: I1125 23:19:54.752464 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-vd62p" podStartSLOduration=2.752446274 podStartE2EDuration="2.752446274s" podCreationTimestamp="2025-11-25 23:19:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:19:54.749405442 +0000 UTC m=+1251.107064564" watchObservedRunningTime="2025-11-25 23:19:54.752446274 +0000 UTC m=+1251.110105386" Nov 25 23:19:54 crc kubenswrapper[5045]: I1125 23:19:54.793063 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" podStartSLOduration=3.793041419 podStartE2EDuration="3.793041419s" podCreationTimestamp="2025-11-25 23:19:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:19:54.774567737 +0000 UTC m=+1251.132226869" watchObservedRunningTime="2025-11-25 23:19:54.793041419 +0000 UTC m=+1251.150700531" Nov 25 23:19:55 crc kubenswrapper[5045]: I1125 23:19:55.543030 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 23:19:55 crc kubenswrapper[5045]: I1125 23:19:55.566224 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 23:19:55 crc kubenswrapper[5045]: I1125 23:19:55.750862 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:19:56 crc kubenswrapper[5045]: I1125 23:19:56.760320 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6f7dc6cc-20be-413d-9e0a-e5a14619f323","Type":"ContainerStarted","Data":"4a3fd3c1df741d1243f675d495a6796d9edf95568fb448bc5db7b90cf70cce12"} Nov 25 23:19:56 crc kubenswrapper[5045]: I1125 23:19:56.760705 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6f7dc6cc-20be-413d-9e0a-e5a14619f323","Type":"ContainerStarted","Data":"9370d113c6168fcea4b804b333e1a13ec2c470c3ff53d8ccd56ee648ed2e27da"} Nov 25 23:19:56 crc kubenswrapper[5045]: I1125 23:19:56.760915 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6f7dc6cc-20be-413d-9e0a-e5a14619f323" containerName="nova-metadata-log" containerID="cri-o://9370d113c6168fcea4b804b333e1a13ec2c470c3ff53d8ccd56ee648ed2e27da" gracePeriod=30 Nov 25 23:19:56 crc kubenswrapper[5045]: I1125 23:19:56.763504 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6f7dc6cc-20be-413d-9e0a-e5a14619f323" containerName="nova-metadata-metadata" containerID="cri-o://4a3fd3c1df741d1243f675d495a6796d9edf95568fb448bc5db7b90cf70cce12" gracePeriod=30 Nov 25 23:19:56 crc kubenswrapper[5045]: I1125 23:19:56.768267 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"285f19f1-962e-4a44-bf63-50137f6aa140","Type":"ContainerStarted","Data":"21a5b2c079233b35a3de8f72476ff5e347adb7408b76fc842cef38ee8142eef9"} Nov 25 23:19:56 crc kubenswrapper[5045]: I1125 23:19:56.768432 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="285f19f1-962e-4a44-bf63-50137f6aa140" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://21a5b2c079233b35a3de8f72476ff5e347adb7408b76fc842cef38ee8142eef9" gracePeriod=30 Nov 25 23:19:56 crc kubenswrapper[5045]: I1125 23:19:56.775324 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"83215ee2-350f-4838-9973-6a8417a491d0","Type":"ContainerStarted","Data":"7f1b2f720930fb2da6f4444c4248afeae120e612fb5ec69d757963731d3928a6"} Nov 25 23:19:56 crc kubenswrapper[5045]: I1125 23:19:56.785580 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5dd606b7-843d-4880-9364-e4a7f629f3a6","Type":"ContainerStarted","Data":"826048025d706a85f58db3c8b82e65acdbceb51badf59797e7bd4bb616e3e5c2"} Nov 25 23:19:56 crc kubenswrapper[5045]: I1125 23:19:56.785616 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5dd606b7-843d-4880-9364-e4a7f629f3a6","Type":"ContainerStarted","Data":"f0a49a367bfecd39d24aff6f1d05fef5696037f18ac55553345a87900ca40f9c"} Nov 25 23:19:56 crc kubenswrapper[5045]: I1125 23:19:56.808802 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.398414302 podStartE2EDuration="5.808785664s" podCreationTimestamp="2025-11-25 23:19:51 +0000 UTC" firstStartedPulling="2025-11-25 23:19:52.746391355 +0000 UTC m=+1249.104050467" lastFinishedPulling="2025-11-25 23:19:56.156762707 +0000 UTC m=+1252.514421829" observedRunningTime="2025-11-25 23:19:56.779470362 +0000 UTC m=+1253.137129494" watchObservedRunningTime="2025-11-25 23:19:56.808785664 +0000 UTC m=+1253.166444776" Nov 25 23:19:56 crc kubenswrapper[5045]: I1125 23:19:56.814023 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=1.746154166 podStartE2EDuration="4.814000212s" podCreationTimestamp="2025-11-25 23:19:52 +0000 UTC" firstStartedPulling="2025-11-25 23:19:53.094437599 +0000 UTC m=+1249.452096711" lastFinishedPulling="2025-11-25 23:19:56.162283625 +0000 UTC m=+1252.519942757" observedRunningTime="2025-11-25 23:19:56.79714949 +0000 UTC m=+1253.154808602" watchObservedRunningTime="2025-11-25 23:19:56.814000212 +0000 UTC m=+1253.171659334" Nov 25 23:19:56 crc kubenswrapper[5045]: I1125 23:19:56.827534 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.811759652 podStartE2EDuration="4.827497103s" podCreationTimestamp="2025-11-25 23:19:52 +0000 UTC" firstStartedPulling="2025-11-25 23:19:53.143042967 +0000 UTC m=+1249.500702079" lastFinishedPulling="2025-11-25 23:19:56.158780408 +0000 UTC m=+1252.516439530" observedRunningTime="2025-11-25 23:19:56.810914988 +0000 UTC m=+1253.168574110" watchObservedRunningTime="2025-11-25 23:19:56.827497103 +0000 UTC m=+1253.185156215" Nov 25 23:19:56 crc kubenswrapper[5045]: I1125 23:19:56.835445 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.541525384 podStartE2EDuration="5.835426284s" podCreationTimestamp="2025-11-25 23:19:51 +0000 UTC" firstStartedPulling="2025-11-25 23:19:52.862858367 +0000 UTC m=+1249.220517479" lastFinishedPulling="2025-11-25 23:19:56.156759237 +0000 UTC m=+1252.514418379" observedRunningTime="2025-11-25 23:19:56.828021669 +0000 UTC m=+1253.185680801" watchObservedRunningTime="2025-11-25 23:19:56.835426284 +0000 UTC m=+1253.193085396" Nov 25 23:19:57 crc kubenswrapper[5045]: I1125 23:19:57.219557 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 23:19:57 crc kubenswrapper[5045]: I1125 23:19:57.219907 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 23:19:57 crc kubenswrapper[5045]: I1125 23:19:57.468155 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:19:57 crc kubenswrapper[5045]: I1125 23:19:57.522878 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 23:19:57 crc kubenswrapper[5045]: I1125 23:19:57.795187 5045 generic.go:334] "Generic (PLEG): container finished" podID="6f7dc6cc-20be-413d-9e0a-e5a14619f323" containerID="9370d113c6168fcea4b804b333e1a13ec2c470c3ff53d8ccd56ee648ed2e27da" exitCode=143 Nov 25 23:19:57 crc kubenswrapper[5045]: I1125 23:19:57.795966 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6f7dc6cc-20be-413d-9e0a-e5a14619f323","Type":"ContainerDied","Data":"9370d113c6168fcea4b804b333e1a13ec2c470c3ff53d8ccd56ee648ed2e27da"} Nov 25 23:19:59 crc kubenswrapper[5045]: I1125 23:19:59.823525 5045 generic.go:334] "Generic (PLEG): container finished" podID="3327fc22-a987-47ad-a327-e5a7a2306f6e" containerID="5f906f9aa6637b72df37cd3da3698e1f39ff90b438c626f71a25fb8725a00638" exitCode=0 Nov 25 23:19:59 crc kubenswrapper[5045]: I1125 23:19:59.823663 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-4psjq" event={"ID":"3327fc22-a987-47ad-a327-e5a7a2306f6e","Type":"ContainerDied","Data":"5f906f9aa6637b72df37cd3da3698e1f39ff90b438c626f71a25fb8725a00638"} Nov 25 23:20:00 crc kubenswrapper[5045]: I1125 23:20:00.835750 5045 generic.go:334] "Generic (PLEG): container finished" podID="6db602b1-d6b8-410c-bd2f-fad64474ac8f" containerID="e6e9d3aac4bd2915e49caed05c7b9dde8a2c55d5aea94ba697da6d3fca729ffb" exitCode=0 Nov 25 23:20:00 crc kubenswrapper[5045]: I1125 23:20:00.835938 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-vd62p" event={"ID":"6db602b1-d6b8-410c-bd2f-fad64474ac8f","Type":"ContainerDied","Data":"e6e9d3aac4bd2915e49caed05c7b9dde8a2c55d5aea94ba697da6d3fca729ffb"} Nov 25 23:20:01 crc kubenswrapper[5045]: I1125 23:20:01.267562 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-4psjq" Nov 25 23:20:01 crc kubenswrapper[5045]: I1125 23:20:01.413902 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3327fc22-a987-47ad-a327-e5a7a2306f6e-scripts\") pod \"3327fc22-a987-47ad-a327-e5a7a2306f6e\" (UID: \"3327fc22-a987-47ad-a327-e5a7a2306f6e\") " Nov 25 23:20:01 crc kubenswrapper[5045]: I1125 23:20:01.414096 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3327fc22-a987-47ad-a327-e5a7a2306f6e-config-data\") pod \"3327fc22-a987-47ad-a327-e5a7a2306f6e\" (UID: \"3327fc22-a987-47ad-a327-e5a7a2306f6e\") " Nov 25 23:20:01 crc kubenswrapper[5045]: I1125 23:20:01.414194 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3327fc22-a987-47ad-a327-e5a7a2306f6e-combined-ca-bundle\") pod \"3327fc22-a987-47ad-a327-e5a7a2306f6e\" (UID: \"3327fc22-a987-47ad-a327-e5a7a2306f6e\") " Nov 25 23:20:01 crc kubenswrapper[5045]: I1125 23:20:01.414398 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tdldh\" (UniqueName: \"kubernetes.io/projected/3327fc22-a987-47ad-a327-e5a7a2306f6e-kube-api-access-tdldh\") pod \"3327fc22-a987-47ad-a327-e5a7a2306f6e\" (UID: \"3327fc22-a987-47ad-a327-e5a7a2306f6e\") " Nov 25 23:20:01 crc kubenswrapper[5045]: I1125 23:20:01.422034 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3327fc22-a987-47ad-a327-e5a7a2306f6e-kube-api-access-tdldh" (OuterVolumeSpecName: "kube-api-access-tdldh") pod "3327fc22-a987-47ad-a327-e5a7a2306f6e" (UID: "3327fc22-a987-47ad-a327-e5a7a2306f6e"). InnerVolumeSpecName "kube-api-access-tdldh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:20:01 crc kubenswrapper[5045]: I1125 23:20:01.423012 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3327fc22-a987-47ad-a327-e5a7a2306f6e-scripts" (OuterVolumeSpecName: "scripts") pod "3327fc22-a987-47ad-a327-e5a7a2306f6e" (UID: "3327fc22-a987-47ad-a327-e5a7a2306f6e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:01 crc kubenswrapper[5045]: I1125 23:20:01.466500 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3327fc22-a987-47ad-a327-e5a7a2306f6e-config-data" (OuterVolumeSpecName: "config-data") pod "3327fc22-a987-47ad-a327-e5a7a2306f6e" (UID: "3327fc22-a987-47ad-a327-e5a7a2306f6e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:01 crc kubenswrapper[5045]: I1125 23:20:01.468219 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3327fc22-a987-47ad-a327-e5a7a2306f6e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3327fc22-a987-47ad-a327-e5a7a2306f6e" (UID: "3327fc22-a987-47ad-a327-e5a7a2306f6e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:01 crc kubenswrapper[5045]: I1125 23:20:01.517417 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3327fc22-a987-47ad-a327-e5a7a2306f6e-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:01 crc kubenswrapper[5045]: I1125 23:20:01.517479 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3327fc22-a987-47ad-a327-e5a7a2306f6e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:01 crc kubenswrapper[5045]: I1125 23:20:01.517506 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3327fc22-a987-47ad-a327-e5a7a2306f6e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:01 crc kubenswrapper[5045]: I1125 23:20:01.517536 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tdldh\" (UniqueName: \"kubernetes.io/projected/3327fc22-a987-47ad-a327-e5a7a2306f6e-kube-api-access-tdldh\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:01 crc kubenswrapper[5045]: I1125 23:20:01.851611 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-4psjq" event={"ID":"3327fc22-a987-47ad-a327-e5a7a2306f6e","Type":"ContainerDied","Data":"730e5a04c33d622bb9f5b270a73f8f16684d0fbc943b1cfc4451c46b2044a301"} Nov 25 23:20:01 crc kubenswrapper[5045]: I1125 23:20:01.851913 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="730e5a04c33d622bb9f5b270a73f8f16684d0fbc943b1cfc4451c46b2044a301" Nov 25 23:20:01 crc kubenswrapper[5045]: I1125 23:20:01.851781 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-4psjq" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.051571 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.052503 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="5dd606b7-843d-4880-9364-e4a7f629f3a6" containerName="nova-api-log" containerID="cri-o://f0a49a367bfecd39d24aff6f1d05fef5696037f18ac55553345a87900ca40f9c" gracePeriod=30 Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.052664 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="5dd606b7-843d-4880-9364-e4a7f629f3a6" containerName="nova-api-api" containerID="cri-o://826048025d706a85f58db3c8b82e65acdbceb51badf59797e7bd4bb616e3e5c2" gracePeriod=30 Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.069835 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.070010 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="83215ee2-350f-4838-9973-6a8417a491d0" containerName="nova-scheduler-scheduler" containerID="cri-o://7f1b2f720930fb2da6f4444c4248afeae120e612fb5ec69d757963731d3928a6" gracePeriod=30 Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.342619 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-vd62p" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.433763 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6db602b1-d6b8-410c-bd2f-fad64474ac8f-config-data\") pod \"6db602b1-d6b8-410c-bd2f-fad64474ac8f\" (UID: \"6db602b1-d6b8-410c-bd2f-fad64474ac8f\") " Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.433820 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jvzdr\" (UniqueName: \"kubernetes.io/projected/6db602b1-d6b8-410c-bd2f-fad64474ac8f-kube-api-access-jvzdr\") pod \"6db602b1-d6b8-410c-bd2f-fad64474ac8f\" (UID: \"6db602b1-d6b8-410c-bd2f-fad64474ac8f\") " Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.434059 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6db602b1-d6b8-410c-bd2f-fad64474ac8f-scripts\") pod \"6db602b1-d6b8-410c-bd2f-fad64474ac8f\" (UID: \"6db602b1-d6b8-410c-bd2f-fad64474ac8f\") " Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.434166 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6db602b1-d6b8-410c-bd2f-fad64474ac8f-combined-ca-bundle\") pod \"6db602b1-d6b8-410c-bd2f-fad64474ac8f\" (UID: \"6db602b1-d6b8-410c-bd2f-fad64474ac8f\") " Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.439680 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6db602b1-d6b8-410c-bd2f-fad64474ac8f-scripts" (OuterVolumeSpecName: "scripts") pod "6db602b1-d6b8-410c-bd2f-fad64474ac8f" (UID: "6db602b1-d6b8-410c-bd2f-fad64474ac8f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.440220 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6db602b1-d6b8-410c-bd2f-fad64474ac8f-kube-api-access-jvzdr" (OuterVolumeSpecName: "kube-api-access-jvzdr") pod "6db602b1-d6b8-410c-bd2f-fad64474ac8f" (UID: "6db602b1-d6b8-410c-bd2f-fad64474ac8f"). InnerVolumeSpecName "kube-api-access-jvzdr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.447916 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.487960 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6db602b1-d6b8-410c-bd2f-fad64474ac8f-config-data" (OuterVolumeSpecName: "config-data") pod "6db602b1-d6b8-410c-bd2f-fad64474ac8f" (UID: "6db602b1-d6b8-410c-bd2f-fad64474ac8f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.518821 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58db5546cc-dnvz9"] Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.520028 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" podUID="bc3d9a68-5746-4875-b808-3b335918a6f4" containerName="dnsmasq-dns" containerID="cri-o://f6e511838c41c01c7318b486d5c136d23c95fd41fc99487bfcebd1ca2d200c00" gracePeriod=10 Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.531948 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6db602b1-d6b8-410c-bd2f-fad64474ac8f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6db602b1-d6b8-410c-bd2f-fad64474ac8f" (UID: "6db602b1-d6b8-410c-bd2f-fad64474ac8f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.537906 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6db602b1-d6b8-410c-bd2f-fad64474ac8f-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.538021 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6db602b1-d6b8-410c-bd2f-fad64474ac8f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.538090 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6db602b1-d6b8-410c-bd2f-fad64474ac8f-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.538152 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jvzdr\" (UniqueName: \"kubernetes.io/projected/6db602b1-d6b8-410c-bd2f-fad64474ac8f-kube-api-access-jvzdr\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.629997 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.741161 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hds8g\" (UniqueName: \"kubernetes.io/projected/5dd606b7-843d-4880-9364-e4a7f629f3a6-kube-api-access-hds8g\") pod \"5dd606b7-843d-4880-9364-e4a7f629f3a6\" (UID: \"5dd606b7-843d-4880-9364-e4a7f629f3a6\") " Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.741209 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dd606b7-843d-4880-9364-e4a7f629f3a6-config-data\") pod \"5dd606b7-843d-4880-9364-e4a7f629f3a6\" (UID: \"5dd606b7-843d-4880-9364-e4a7f629f3a6\") " Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.741257 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dd606b7-843d-4880-9364-e4a7f629f3a6-combined-ca-bundle\") pod \"5dd606b7-843d-4880-9364-e4a7f629f3a6\" (UID: \"5dd606b7-843d-4880-9364-e4a7f629f3a6\") " Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.741350 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5dd606b7-843d-4880-9364-e4a7f629f3a6-logs\") pod \"5dd606b7-843d-4880-9364-e4a7f629f3a6\" (UID: \"5dd606b7-843d-4880-9364-e4a7f629f3a6\") " Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.741926 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5dd606b7-843d-4880-9364-e4a7f629f3a6-logs" (OuterVolumeSpecName: "logs") pod "5dd606b7-843d-4880-9364-e4a7f629f3a6" (UID: "5dd606b7-843d-4880-9364-e4a7f629f3a6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.745536 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5dd606b7-843d-4880-9364-e4a7f629f3a6-kube-api-access-hds8g" (OuterVolumeSpecName: "kube-api-access-hds8g") pod "5dd606b7-843d-4880-9364-e4a7f629f3a6" (UID: "5dd606b7-843d-4880-9364-e4a7f629f3a6"). InnerVolumeSpecName "kube-api-access-hds8g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.769205 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5dd606b7-843d-4880-9364-e4a7f629f3a6-config-data" (OuterVolumeSpecName: "config-data") pod "5dd606b7-843d-4880-9364-e4a7f629f3a6" (UID: "5dd606b7-843d-4880-9364-e4a7f629f3a6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.777145 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5dd606b7-843d-4880-9364-e4a7f629f3a6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5dd606b7-843d-4880-9364-e4a7f629f3a6" (UID: "5dd606b7-843d-4880-9364-e4a7f629f3a6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.843158 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hds8g\" (UniqueName: \"kubernetes.io/projected/5dd606b7-843d-4880-9364-e4a7f629f3a6-kube-api-access-hds8g\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.843197 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dd606b7-843d-4880-9364-e4a7f629f3a6-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.843207 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dd606b7-843d-4880-9364-e4a7f629f3a6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.843216 5045 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5dd606b7-843d-4880-9364-e4a7f629f3a6-logs\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.872377 5045 generic.go:334] "Generic (PLEG): container finished" podID="bc3d9a68-5746-4875-b808-3b335918a6f4" containerID="f6e511838c41c01c7318b486d5c136d23c95fd41fc99487bfcebd1ca2d200c00" exitCode=0 Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.873088 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" event={"ID":"bc3d9a68-5746-4875-b808-3b335918a6f4","Type":"ContainerDied","Data":"f6e511838c41c01c7318b486d5c136d23c95fd41fc99487bfcebd1ca2d200c00"} Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.879905 5045 generic.go:334] "Generic (PLEG): container finished" podID="5dd606b7-843d-4880-9364-e4a7f629f3a6" containerID="826048025d706a85f58db3c8b82e65acdbceb51badf59797e7bd4bb616e3e5c2" exitCode=0 Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.880090 5045 generic.go:334] "Generic (PLEG): container finished" podID="5dd606b7-843d-4880-9364-e4a7f629f3a6" containerID="f0a49a367bfecd39d24aff6f1d05fef5696037f18ac55553345a87900ca40f9c" exitCode=143 Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.880355 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.880403 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5dd606b7-843d-4880-9364-e4a7f629f3a6","Type":"ContainerDied","Data":"826048025d706a85f58db3c8b82e65acdbceb51badf59797e7bd4bb616e3e5c2"} Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.880516 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5dd606b7-843d-4880-9364-e4a7f629f3a6","Type":"ContainerDied","Data":"f0a49a367bfecd39d24aff6f1d05fef5696037f18ac55553345a87900ca40f9c"} Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.880535 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5dd606b7-843d-4880-9364-e4a7f629f3a6","Type":"ContainerDied","Data":"89e344cdf19c69a6862cd233fb24487536f79492ec7c006a4ceda0229afb6488"} Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.880566 5045 scope.go:117] "RemoveContainer" containerID="826048025d706a85f58db3c8b82e65acdbceb51badf59797e7bd4bb616e3e5c2" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.883936 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-vd62p" event={"ID":"6db602b1-d6b8-410c-bd2f-fad64474ac8f","Type":"ContainerDied","Data":"5aa4e843baaee23fd7ac32e7c2a694f5fa1e6cbe4298703060253fbf5b2a6479"} Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.883990 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5aa4e843baaee23fd7ac32e7c2a694f5fa1e6cbe4298703060253fbf5b2a6479" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.884184 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-vd62p" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.915376 5045 scope.go:117] "RemoveContainer" containerID="f0a49a367bfecd39d24aff6f1d05fef5696037f18ac55553345a87900ca40f9c" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.937910 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.943791 5045 scope.go:117] "RemoveContainer" containerID="826048025d706a85f58db3c8b82e65acdbceb51badf59797e7bd4bb616e3e5c2" Nov 25 23:20:02 crc kubenswrapper[5045]: E1125 23:20:02.945705 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"826048025d706a85f58db3c8b82e65acdbceb51badf59797e7bd4bb616e3e5c2\": container with ID starting with 826048025d706a85f58db3c8b82e65acdbceb51badf59797e7bd4bb616e3e5c2 not found: ID does not exist" containerID="826048025d706a85f58db3c8b82e65acdbceb51badf59797e7bd4bb616e3e5c2" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.945765 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"826048025d706a85f58db3c8b82e65acdbceb51badf59797e7bd4bb616e3e5c2"} err="failed to get container status \"826048025d706a85f58db3c8b82e65acdbceb51badf59797e7bd4bb616e3e5c2\": rpc error: code = NotFound desc = could not find container \"826048025d706a85f58db3c8b82e65acdbceb51badf59797e7bd4bb616e3e5c2\": container with ID starting with 826048025d706a85f58db3c8b82e65acdbceb51badf59797e7bd4bb616e3e5c2 not found: ID does not exist" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.945797 5045 scope.go:117] "RemoveContainer" containerID="f0a49a367bfecd39d24aff6f1d05fef5696037f18ac55553345a87900ca40f9c" Nov 25 23:20:02 crc kubenswrapper[5045]: E1125 23:20:02.946176 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0a49a367bfecd39d24aff6f1d05fef5696037f18ac55553345a87900ca40f9c\": container with ID starting with f0a49a367bfecd39d24aff6f1d05fef5696037f18ac55553345a87900ca40f9c not found: ID does not exist" containerID="f0a49a367bfecd39d24aff6f1d05fef5696037f18ac55553345a87900ca40f9c" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.946216 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0a49a367bfecd39d24aff6f1d05fef5696037f18ac55553345a87900ca40f9c"} err="failed to get container status \"f0a49a367bfecd39d24aff6f1d05fef5696037f18ac55553345a87900ca40f9c\": rpc error: code = NotFound desc = could not find container \"f0a49a367bfecd39d24aff6f1d05fef5696037f18ac55553345a87900ca40f9c\": container with ID starting with f0a49a367bfecd39d24aff6f1d05fef5696037f18ac55553345a87900ca40f9c not found: ID does not exist" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.946236 5045 scope.go:117] "RemoveContainer" containerID="826048025d706a85f58db3c8b82e65acdbceb51badf59797e7bd4bb616e3e5c2" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.946491 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"826048025d706a85f58db3c8b82e65acdbceb51badf59797e7bd4bb616e3e5c2"} err="failed to get container status \"826048025d706a85f58db3c8b82e65acdbceb51badf59797e7bd4bb616e3e5c2\": rpc error: code = NotFound desc = could not find container \"826048025d706a85f58db3c8b82e65acdbceb51badf59797e7bd4bb616e3e5c2\": container with ID starting with 826048025d706a85f58db3c8b82e65acdbceb51badf59797e7bd4bb616e3e5c2 not found: ID does not exist" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.946513 5045 scope.go:117] "RemoveContainer" containerID="f0a49a367bfecd39d24aff6f1d05fef5696037f18ac55553345a87900ca40f9c" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.946802 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0a49a367bfecd39d24aff6f1d05fef5696037f18ac55553345a87900ca40f9c"} err="failed to get container status \"f0a49a367bfecd39d24aff6f1d05fef5696037f18ac55553345a87900ca40f9c\": rpc error: code = NotFound desc = could not find container \"f0a49a367bfecd39d24aff6f1d05fef5696037f18ac55553345a87900ca40f9c\": container with ID starting with f0a49a367bfecd39d24aff6f1d05fef5696037f18ac55553345a87900ca40f9c not found: ID does not exist" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.960354 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.977277 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.987856 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 23:20:02 crc kubenswrapper[5045]: E1125 23:20:02.988206 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dd606b7-843d-4880-9364-e4a7f629f3a6" containerName="nova-api-log" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.988223 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dd606b7-843d-4880-9364-e4a7f629f3a6" containerName="nova-api-log" Nov 25 23:20:02 crc kubenswrapper[5045]: E1125 23:20:02.988249 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6db602b1-d6b8-410c-bd2f-fad64474ac8f" containerName="nova-cell1-conductor-db-sync" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.988256 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="6db602b1-d6b8-410c-bd2f-fad64474ac8f" containerName="nova-cell1-conductor-db-sync" Nov 25 23:20:02 crc kubenswrapper[5045]: E1125 23:20:02.988272 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dd606b7-843d-4880-9364-e4a7f629f3a6" containerName="nova-api-api" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.988278 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dd606b7-843d-4880-9364-e4a7f629f3a6" containerName="nova-api-api" Nov 25 23:20:02 crc kubenswrapper[5045]: E1125 23:20:02.988293 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3327fc22-a987-47ad-a327-e5a7a2306f6e" containerName="nova-manage" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.988299 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="3327fc22-a987-47ad-a327-e5a7a2306f6e" containerName="nova-manage" Nov 25 23:20:02 crc kubenswrapper[5045]: E1125 23:20:02.988310 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc3d9a68-5746-4875-b808-3b335918a6f4" containerName="dnsmasq-dns" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.988316 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc3d9a68-5746-4875-b808-3b335918a6f4" containerName="dnsmasq-dns" Nov 25 23:20:02 crc kubenswrapper[5045]: E1125 23:20:02.988329 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc3d9a68-5746-4875-b808-3b335918a6f4" containerName="init" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.988338 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc3d9a68-5746-4875-b808-3b335918a6f4" containerName="init" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.988509 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="5dd606b7-843d-4880-9364-e4a7f629f3a6" containerName="nova-api-api" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.988532 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="5dd606b7-843d-4880-9364-e4a7f629f3a6" containerName="nova-api-log" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.988541 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="6db602b1-d6b8-410c-bd2f-fad64474ac8f" containerName="nova-cell1-conductor-db-sync" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.988550 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="3327fc22-a987-47ad-a327-e5a7a2306f6e" containerName="nova-manage" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.988559 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc3d9a68-5746-4875-b808-3b335918a6f4" containerName="dnsmasq-dns" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.989193 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.995188 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 23:20:02 crc kubenswrapper[5045]: I1125 23:20:02.996820 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.002221 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.013132 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.015591 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.036447 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.047570 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-ovsdbserver-sb\") pod \"bc3d9a68-5746-4875-b808-3b335918a6f4\" (UID: \"bc3d9a68-5746-4875-b808-3b335918a6f4\") " Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.047671 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-ovsdbserver-nb\") pod \"bc3d9a68-5746-4875-b808-3b335918a6f4\" (UID: \"bc3d9a68-5746-4875-b808-3b335918a6f4\") " Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.047775 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-config\") pod \"bc3d9a68-5746-4875-b808-3b335918a6f4\" (UID: \"bc3d9a68-5746-4875-b808-3b335918a6f4\") " Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.047840 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kprsj\" (UniqueName: \"kubernetes.io/projected/bc3d9a68-5746-4875-b808-3b335918a6f4-kube-api-access-kprsj\") pod \"bc3d9a68-5746-4875-b808-3b335918a6f4\" (UID: \"bc3d9a68-5746-4875-b808-3b335918a6f4\") " Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.047944 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-dns-svc\") pod \"bc3d9a68-5746-4875-b808-3b335918a6f4\" (UID: \"bc3d9a68-5746-4875-b808-3b335918a6f4\") " Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.048335 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf45160d-7c2c-4d8c-81da-db68ed300d2d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"cf45160d-7c2c-4d8c-81da-db68ed300d2d\") " pod="openstack/nova-cell1-conductor-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.048378 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q29bv\" (UniqueName: \"kubernetes.io/projected/cf45160d-7c2c-4d8c-81da-db68ed300d2d-kube-api-access-q29bv\") pod \"nova-cell1-conductor-0\" (UID: \"cf45160d-7c2c-4d8c-81da-db68ed300d2d\") " pod="openstack/nova-cell1-conductor-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.048474 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf45160d-7c2c-4d8c-81da-db68ed300d2d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"cf45160d-7c2c-4d8c-81da-db68ed300d2d\") " pod="openstack/nova-cell1-conductor-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.054521 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc3d9a68-5746-4875-b808-3b335918a6f4-kube-api-access-kprsj" (OuterVolumeSpecName: "kube-api-access-kprsj") pod "bc3d9a68-5746-4875-b808-3b335918a6f4" (UID: "bc3d9a68-5746-4875-b808-3b335918a6f4"). InnerVolumeSpecName "kube-api-access-kprsj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.103393 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bc3d9a68-5746-4875-b808-3b335918a6f4" (UID: "bc3d9a68-5746-4875-b808-3b335918a6f4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.104783 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bc3d9a68-5746-4875-b808-3b335918a6f4" (UID: "bc3d9a68-5746-4875-b808-3b335918a6f4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.108273 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bc3d9a68-5746-4875-b808-3b335918a6f4" (UID: "bc3d9a68-5746-4875-b808-3b335918a6f4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.114028 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-config" (OuterVolumeSpecName: "config") pod "bc3d9a68-5746-4875-b808-3b335918a6f4" (UID: "bc3d9a68-5746-4875-b808-3b335918a6f4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.149561 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9163334-4411-4a03-9d40-8a86305dc8ee-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a9163334-4411-4a03-9d40-8a86305dc8ee\") " pod="openstack/nova-api-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.149619 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42q8r\" (UniqueName: \"kubernetes.io/projected/a9163334-4411-4a03-9d40-8a86305dc8ee-kube-api-access-42q8r\") pod \"nova-api-0\" (UID: \"a9163334-4411-4a03-9d40-8a86305dc8ee\") " pod="openstack/nova-api-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.149644 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf45160d-7c2c-4d8c-81da-db68ed300d2d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"cf45160d-7c2c-4d8c-81da-db68ed300d2d\") " pod="openstack/nova-cell1-conductor-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.149791 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9163334-4411-4a03-9d40-8a86305dc8ee-logs\") pod \"nova-api-0\" (UID: \"a9163334-4411-4a03-9d40-8a86305dc8ee\") " pod="openstack/nova-api-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.149951 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9163334-4411-4a03-9d40-8a86305dc8ee-config-data\") pod \"nova-api-0\" (UID: \"a9163334-4411-4a03-9d40-8a86305dc8ee\") " pod="openstack/nova-api-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.150077 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf45160d-7c2c-4d8c-81da-db68ed300d2d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"cf45160d-7c2c-4d8c-81da-db68ed300d2d\") " pod="openstack/nova-cell1-conductor-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.150124 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q29bv\" (UniqueName: \"kubernetes.io/projected/cf45160d-7c2c-4d8c-81da-db68ed300d2d-kube-api-access-q29bv\") pod \"nova-cell1-conductor-0\" (UID: \"cf45160d-7c2c-4d8c-81da-db68ed300d2d\") " pod="openstack/nova-cell1-conductor-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.150212 5045 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.150229 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.150243 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.150253 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc3d9a68-5746-4875-b808-3b335918a6f4-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.150262 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kprsj\" (UniqueName: \"kubernetes.io/projected/bc3d9a68-5746-4875-b808-3b335918a6f4-kube-api-access-kprsj\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.153309 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf45160d-7c2c-4d8c-81da-db68ed300d2d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"cf45160d-7c2c-4d8c-81da-db68ed300d2d\") " pod="openstack/nova-cell1-conductor-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.153930 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf45160d-7c2c-4d8c-81da-db68ed300d2d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"cf45160d-7c2c-4d8c-81da-db68ed300d2d\") " pod="openstack/nova-cell1-conductor-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.165202 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q29bv\" (UniqueName: \"kubernetes.io/projected/cf45160d-7c2c-4d8c-81da-db68ed300d2d-kube-api-access-q29bv\") pod \"nova-cell1-conductor-0\" (UID: \"cf45160d-7c2c-4d8c-81da-db68ed300d2d\") " pod="openstack/nova-cell1-conductor-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.251429 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9163334-4411-4a03-9d40-8a86305dc8ee-logs\") pod \"nova-api-0\" (UID: \"a9163334-4411-4a03-9d40-8a86305dc8ee\") " pod="openstack/nova-api-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.251528 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9163334-4411-4a03-9d40-8a86305dc8ee-config-data\") pod \"nova-api-0\" (UID: \"a9163334-4411-4a03-9d40-8a86305dc8ee\") " pod="openstack/nova-api-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.251611 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9163334-4411-4a03-9d40-8a86305dc8ee-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a9163334-4411-4a03-9d40-8a86305dc8ee\") " pod="openstack/nova-api-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.251643 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42q8r\" (UniqueName: \"kubernetes.io/projected/a9163334-4411-4a03-9d40-8a86305dc8ee-kube-api-access-42q8r\") pod \"nova-api-0\" (UID: \"a9163334-4411-4a03-9d40-8a86305dc8ee\") " pod="openstack/nova-api-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.252046 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9163334-4411-4a03-9d40-8a86305dc8ee-logs\") pod \"nova-api-0\" (UID: \"a9163334-4411-4a03-9d40-8a86305dc8ee\") " pod="openstack/nova-api-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.255543 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9163334-4411-4a03-9d40-8a86305dc8ee-config-data\") pod \"nova-api-0\" (UID: \"a9163334-4411-4a03-9d40-8a86305dc8ee\") " pod="openstack/nova-api-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.258440 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9163334-4411-4a03-9d40-8a86305dc8ee-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a9163334-4411-4a03-9d40-8a86305dc8ee\") " pod="openstack/nova-api-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.269081 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42q8r\" (UniqueName: \"kubernetes.io/projected/a9163334-4411-4a03-9d40-8a86305dc8ee-kube-api-access-42q8r\") pod \"nova-api-0\" (UID: \"a9163334-4411-4a03-9d40-8a86305dc8ee\") " pod="openstack/nova-api-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.356475 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.356791 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.866274 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 23:20:03 crc kubenswrapper[5045]: W1125 23:20:03.868864 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda9163334_4411_4a03_9d40_8a86305dc8ee.slice/crio-4e30ae40e501f2e886341dd9f780bc3fff12284bbcf5e128c3136b6dd7dd0319 WatchSource:0}: Error finding container 4e30ae40e501f2e886341dd9f780bc3fff12284bbcf5e128c3136b6dd7dd0319: Status 404 returned error can't find the container with id 4e30ae40e501f2e886341dd9f780bc3fff12284bbcf5e128c3136b6dd7dd0319 Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.885496 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.902337 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" event={"ID":"bc3d9a68-5746-4875-b808-3b335918a6f4","Type":"ContainerDied","Data":"66724663fd3a52bbd8e950631f5ffaf38422441fffcf158e7ae869fa504bd199"} Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.902355 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58db5546cc-dnvz9" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.902405 5045 scope.go:117] "RemoveContainer" containerID="f6e511838c41c01c7318b486d5c136d23c95fd41fc99487bfcebd1ca2d200c00" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.907805 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a9163334-4411-4a03-9d40-8a86305dc8ee","Type":"ContainerStarted","Data":"4e30ae40e501f2e886341dd9f780bc3fff12284bbcf5e128c3136b6dd7dd0319"} Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.911324 5045 generic.go:334] "Generic (PLEG): container finished" podID="83215ee2-350f-4838-9973-6a8417a491d0" containerID="7f1b2f720930fb2da6f4444c4248afeae120e612fb5ec69d757963731d3928a6" exitCode=0 Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.911407 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"83215ee2-350f-4838-9973-6a8417a491d0","Type":"ContainerDied","Data":"7f1b2f720930fb2da6f4444c4248afeae120e612fb5ec69d757963731d3928a6"} Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.912375 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"cf45160d-7c2c-4d8c-81da-db68ed300d2d","Type":"ContainerStarted","Data":"29e0e28cdbea9fa813f510f577afd588dba7b9a6289ac324c9f6b2e540426ad0"} Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.933527 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58db5546cc-dnvz9"] Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.938372 5045 scope.go:117] "RemoveContainer" containerID="c3fa77edafac5664d10858f50251d823ff6cd27d7e7d23d5a270e66f6fe5fe29" Nov 25 23:20:03 crc kubenswrapper[5045]: I1125 23:20:03.942983 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-58db5546cc-dnvz9"] Nov 25 23:20:04 crc kubenswrapper[5045]: I1125 23:20:04.085633 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 23:20:04 crc kubenswrapper[5045]: I1125 23:20:04.168494 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c4nc8\" (UniqueName: \"kubernetes.io/projected/83215ee2-350f-4838-9973-6a8417a491d0-kube-api-access-c4nc8\") pod \"83215ee2-350f-4838-9973-6a8417a491d0\" (UID: \"83215ee2-350f-4838-9973-6a8417a491d0\") " Nov 25 23:20:04 crc kubenswrapper[5045]: I1125 23:20:04.168625 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83215ee2-350f-4838-9973-6a8417a491d0-config-data\") pod \"83215ee2-350f-4838-9973-6a8417a491d0\" (UID: \"83215ee2-350f-4838-9973-6a8417a491d0\") " Nov 25 23:20:04 crc kubenswrapper[5045]: I1125 23:20:04.168784 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83215ee2-350f-4838-9973-6a8417a491d0-combined-ca-bundle\") pod \"83215ee2-350f-4838-9973-6a8417a491d0\" (UID: \"83215ee2-350f-4838-9973-6a8417a491d0\") " Nov 25 23:20:04 crc kubenswrapper[5045]: I1125 23:20:04.172750 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83215ee2-350f-4838-9973-6a8417a491d0-kube-api-access-c4nc8" (OuterVolumeSpecName: "kube-api-access-c4nc8") pod "83215ee2-350f-4838-9973-6a8417a491d0" (UID: "83215ee2-350f-4838-9973-6a8417a491d0"). InnerVolumeSpecName "kube-api-access-c4nc8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:20:04 crc kubenswrapper[5045]: I1125 23:20:04.196149 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83215ee2-350f-4838-9973-6a8417a491d0-config-data" (OuterVolumeSpecName: "config-data") pod "83215ee2-350f-4838-9973-6a8417a491d0" (UID: "83215ee2-350f-4838-9973-6a8417a491d0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:04 crc kubenswrapper[5045]: I1125 23:20:04.199129 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83215ee2-350f-4838-9973-6a8417a491d0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "83215ee2-350f-4838-9973-6a8417a491d0" (UID: "83215ee2-350f-4838-9973-6a8417a491d0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:04 crc kubenswrapper[5045]: I1125 23:20:04.270383 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83215ee2-350f-4838-9973-6a8417a491d0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:04 crc kubenswrapper[5045]: I1125 23:20:04.270417 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c4nc8\" (UniqueName: \"kubernetes.io/projected/83215ee2-350f-4838-9973-6a8417a491d0-kube-api-access-c4nc8\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:04 crc kubenswrapper[5045]: I1125 23:20:04.270427 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83215ee2-350f-4838-9973-6a8417a491d0-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:04 crc kubenswrapper[5045]: I1125 23:20:04.425614 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5dd606b7-843d-4880-9364-e4a7f629f3a6" path="/var/lib/kubelet/pods/5dd606b7-843d-4880-9364-e4a7f629f3a6/volumes" Nov 25 23:20:04 crc kubenswrapper[5045]: I1125 23:20:04.426959 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc3d9a68-5746-4875-b808-3b335918a6f4" path="/var/lib/kubelet/pods/bc3d9a68-5746-4875-b808-3b335918a6f4/volumes" Nov 25 23:20:04 crc kubenswrapper[5045]: I1125 23:20:04.930246 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a9163334-4411-4a03-9d40-8a86305dc8ee","Type":"ContainerStarted","Data":"c64a1c1e0d3565d0789bffa029b2bfb511715015e4b102d8aa471580e05f62f3"} Nov 25 23:20:04 crc kubenswrapper[5045]: I1125 23:20:04.930675 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a9163334-4411-4a03-9d40-8a86305dc8ee","Type":"ContainerStarted","Data":"e7f235c87e34c945735fef653700403e6e6dda64d89f91c29b3c653e91821780"} Nov 25 23:20:04 crc kubenswrapper[5045]: I1125 23:20:04.933750 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"83215ee2-350f-4838-9973-6a8417a491d0","Type":"ContainerDied","Data":"889b657a85e05fd36b37e3817014bb257099861292d346268b752ce369175e46"} Nov 25 23:20:04 crc kubenswrapper[5045]: I1125 23:20:04.933843 5045 scope.go:117] "RemoveContainer" containerID="7f1b2f720930fb2da6f4444c4248afeae120e612fb5ec69d757963731d3928a6" Nov 25 23:20:04 crc kubenswrapper[5045]: I1125 23:20:04.934097 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 23:20:04 crc kubenswrapper[5045]: I1125 23:20:04.942019 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"cf45160d-7c2c-4d8c-81da-db68ed300d2d","Type":"ContainerStarted","Data":"0a7ab2a68083db6f3fbb935c87bb467c6bb419d99d2ffe4c5d69be23dc26386b"} Nov 25 23:20:04 crc kubenswrapper[5045]: I1125 23:20:04.942637 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 25 23:20:04 crc kubenswrapper[5045]: I1125 23:20:04.982144 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.982117546 podStartE2EDuration="2.982117546s" podCreationTimestamp="2025-11-25 23:20:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:20:04.958006543 +0000 UTC m=+1261.315665695" watchObservedRunningTime="2025-11-25 23:20:04.982117546 +0000 UTC m=+1261.339776688" Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.016749 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.032126 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.040449 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 23:20:05 crc kubenswrapper[5045]: E1125 23:20:05.041020 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83215ee2-350f-4838-9973-6a8417a491d0" containerName="nova-scheduler-scheduler" Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.041039 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="83215ee2-350f-4838-9973-6a8417a491d0" containerName="nova-scheduler-scheduler" Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.041256 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="83215ee2-350f-4838-9973-6a8417a491d0" containerName="nova-scheduler-scheduler" Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.042631 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.045268 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.045313 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=3.045295127 podStartE2EDuration="3.045295127s" podCreationTimestamp="2025-11-25 23:20:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:20:05.009419936 +0000 UTC m=+1261.367079068" watchObservedRunningTime="2025-11-25 23:20:05.045295127 +0000 UTC m=+1261.402954239" Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.058990 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.193654 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ab23f2-ab24-405e-8a1c-cdac875e433b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"52ab23f2-ab24-405e-8a1c-cdac875e433b\") " pod="openstack/nova-scheduler-0" Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.193864 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ab23f2-ab24-405e-8a1c-cdac875e433b-config-data\") pod \"nova-scheduler-0\" (UID: \"52ab23f2-ab24-405e-8a1c-cdac875e433b\") " pod="openstack/nova-scheduler-0" Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.194062 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqkf7\" (UniqueName: \"kubernetes.io/projected/52ab23f2-ab24-405e-8a1c-cdac875e433b-kube-api-access-pqkf7\") pod \"nova-scheduler-0\" (UID: \"52ab23f2-ab24-405e-8a1c-cdac875e433b\") " pod="openstack/nova-scheduler-0" Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.295512 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ab23f2-ab24-405e-8a1c-cdac875e433b-config-data\") pod \"nova-scheduler-0\" (UID: \"52ab23f2-ab24-405e-8a1c-cdac875e433b\") " pod="openstack/nova-scheduler-0" Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.295833 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqkf7\" (UniqueName: \"kubernetes.io/projected/52ab23f2-ab24-405e-8a1c-cdac875e433b-kube-api-access-pqkf7\") pod \"nova-scheduler-0\" (UID: \"52ab23f2-ab24-405e-8a1c-cdac875e433b\") " pod="openstack/nova-scheduler-0" Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.296360 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ab23f2-ab24-405e-8a1c-cdac875e433b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"52ab23f2-ab24-405e-8a1c-cdac875e433b\") " pod="openstack/nova-scheduler-0" Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.301187 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ab23f2-ab24-405e-8a1c-cdac875e433b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"52ab23f2-ab24-405e-8a1c-cdac875e433b\") " pod="openstack/nova-scheduler-0" Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.303214 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ab23f2-ab24-405e-8a1c-cdac875e433b-config-data\") pod \"nova-scheduler-0\" (UID: \"52ab23f2-ab24-405e-8a1c-cdac875e433b\") " pod="openstack/nova-scheduler-0" Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.324097 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqkf7\" (UniqueName: \"kubernetes.io/projected/52ab23f2-ab24-405e-8a1c-cdac875e433b-kube-api-access-pqkf7\") pod \"nova-scheduler-0\" (UID: \"52ab23f2-ab24-405e-8a1c-cdac875e433b\") " pod="openstack/nova-scheduler-0" Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.366863 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.847172 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 23:20:05 crc kubenswrapper[5045]: I1125 23:20:05.952991 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"52ab23f2-ab24-405e-8a1c-cdac875e433b","Type":"ContainerStarted","Data":"2e79cd3e45219672233acce2d457e0710c5bd3e8dd5de8ed385bcb50e77cada7"} Nov 25 23:20:06 crc kubenswrapper[5045]: I1125 23:20:06.227158 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 23:20:06 crc kubenswrapper[5045]: I1125 23:20:06.408394 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83215ee2-350f-4838-9973-6a8417a491d0" path="/var/lib/kubelet/pods/83215ee2-350f-4838-9973-6a8417a491d0/volumes" Nov 25 23:20:07 crc kubenswrapper[5045]: I1125 23:20:07.011933 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"52ab23f2-ab24-405e-8a1c-cdac875e433b","Type":"ContainerStarted","Data":"41f58a026e5fed932a73b18d41697680d94490d3152810196ebae7d402af2b47"} Nov 25 23:20:07 crc kubenswrapper[5045]: I1125 23:20:07.043663 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.043636583 podStartE2EDuration="3.043636583s" podCreationTimestamp="2025-11-25 23:20:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:20:07.036536947 +0000 UTC m=+1263.394196099" watchObservedRunningTime="2025-11-25 23:20:07.043636583 +0000 UTC m=+1263.401295705" Nov 25 23:20:08 crc kubenswrapper[5045]: I1125 23:20:08.856555 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 23:20:08 crc kubenswrapper[5045]: I1125 23:20:08.858073 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="cf25e150-e63e-4987-89ce-6e1419e00e88" containerName="kube-state-metrics" containerID="cri-o://65339fe1e0a8b29264bb776769c1925a8ecb345e0e20c66f76f46626b28194c4" gracePeriod=30 Nov 25 23:20:09 crc kubenswrapper[5045]: I1125 23:20:09.001641 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/kube-state-metrics-0" podUID="cf25e150-e63e-4987-89ce-6e1419e00e88" containerName="kube-state-metrics" probeResult="failure" output="Get \"http://10.217.0.104:8081/readyz\": dial tcp 10.217.0.104:8081: connect: connection refused" Nov 25 23:20:09 crc kubenswrapper[5045]: I1125 23:20:09.046058 5045 generic.go:334] "Generic (PLEG): container finished" podID="cf25e150-e63e-4987-89ce-6e1419e00e88" containerID="65339fe1e0a8b29264bb776769c1925a8ecb345e0e20c66f76f46626b28194c4" exitCode=2 Nov 25 23:20:09 crc kubenswrapper[5045]: I1125 23:20:09.046102 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"cf25e150-e63e-4987-89ce-6e1419e00e88","Type":"ContainerDied","Data":"65339fe1e0a8b29264bb776769c1925a8ecb345e0e20c66f76f46626b28194c4"} Nov 25 23:20:09 crc kubenswrapper[5045]: I1125 23:20:09.378693 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 23:20:09 crc kubenswrapper[5045]: I1125 23:20:09.487998 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gfpvf\" (UniqueName: \"kubernetes.io/projected/cf25e150-e63e-4987-89ce-6e1419e00e88-kube-api-access-gfpvf\") pod \"cf25e150-e63e-4987-89ce-6e1419e00e88\" (UID: \"cf25e150-e63e-4987-89ce-6e1419e00e88\") " Nov 25 23:20:09 crc kubenswrapper[5045]: I1125 23:20:09.494927 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf25e150-e63e-4987-89ce-6e1419e00e88-kube-api-access-gfpvf" (OuterVolumeSpecName: "kube-api-access-gfpvf") pod "cf25e150-e63e-4987-89ce-6e1419e00e88" (UID: "cf25e150-e63e-4987-89ce-6e1419e00e88"). InnerVolumeSpecName "kube-api-access-gfpvf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:20:09 crc kubenswrapper[5045]: I1125 23:20:09.590619 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gfpvf\" (UniqueName: \"kubernetes.io/projected/cf25e150-e63e-4987-89ce-6e1419e00e88-kube-api-access-gfpvf\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:09 crc kubenswrapper[5045]: I1125 23:20:09.899336 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:20:09 crc kubenswrapper[5045]: I1125 23:20:09.900971 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5d09e5d1-9221-4785-abe2-8763781bc338" containerName="ceilometer-central-agent" containerID="cri-o://34b339cadc972286c598dda1220fde24697d8e7e3a84a277cc7cba888b24dfc7" gracePeriod=30 Nov 25 23:20:09 crc kubenswrapper[5045]: I1125 23:20:09.901082 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5d09e5d1-9221-4785-abe2-8763781bc338" containerName="sg-core" containerID="cri-o://761f970d4b21945f7c6cfca9f3b520aff4bf72adf5b95fce325ce614f5d1b5c5" gracePeriod=30 Nov 25 23:20:09 crc kubenswrapper[5045]: I1125 23:20:09.901145 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5d09e5d1-9221-4785-abe2-8763781bc338" containerName="proxy-httpd" containerID="cri-o://84c45197d1e435cf0d6adcc58f6a9a6daabb6262355b5d05181a02173203716a" gracePeriod=30 Nov 25 23:20:09 crc kubenswrapper[5045]: I1125 23:20:09.901209 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5d09e5d1-9221-4785-abe2-8763781bc338" containerName="ceilometer-notification-agent" containerID="cri-o://1b1787917344e8a2e0756c031616e6565906d3a31e1f75f0bc4cc2eb0b981ae1" gracePeriod=30 Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.056491 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"cf25e150-e63e-4987-89ce-6e1419e00e88","Type":"ContainerDied","Data":"5840ac0803474125244042f84d64dad2d1362f190734b3cd2b2d4340d7128b35"} Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.056548 5045 scope.go:117] "RemoveContainer" containerID="65339fe1e0a8b29264bb776769c1925a8ecb345e0e20c66f76f46626b28194c4" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.056681 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.064169 5045 generic.go:334] "Generic (PLEG): container finished" podID="5d09e5d1-9221-4785-abe2-8763781bc338" containerID="761f970d4b21945f7c6cfca9f3b520aff4bf72adf5b95fce325ce614f5d1b5c5" exitCode=2 Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.064227 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5d09e5d1-9221-4785-abe2-8763781bc338","Type":"ContainerDied","Data":"761f970d4b21945f7c6cfca9f3b520aff4bf72adf5b95fce325ce614f5d1b5c5"} Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.104449 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.128140 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.135872 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 23:20:10 crc kubenswrapper[5045]: E1125 23:20:10.136508 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf25e150-e63e-4987-89ce-6e1419e00e88" containerName="kube-state-metrics" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.136612 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf25e150-e63e-4987-89ce-6e1419e00e88" containerName="kube-state-metrics" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.136905 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf25e150-e63e-4987-89ce-6e1419e00e88" containerName="kube-state-metrics" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.137737 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.139853 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.140189 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.147725 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.304653 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2\") " pod="openstack/kube-state-metrics-0" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.304775 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2\") " pod="openstack/kube-state-metrics-0" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.304812 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6fzfj\" (UniqueName: \"kubernetes.io/projected/9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2-kube-api-access-6fzfj\") pod \"kube-state-metrics-0\" (UID: \"9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2\") " pod="openstack/kube-state-metrics-0" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.304897 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2\") " pod="openstack/kube-state-metrics-0" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.368166 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.406198 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2\") " pod="openstack/kube-state-metrics-0" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.406275 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2\") " pod="openstack/kube-state-metrics-0" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.406302 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6fzfj\" (UniqueName: \"kubernetes.io/projected/9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2-kube-api-access-6fzfj\") pod \"kube-state-metrics-0\" (UID: \"9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2\") " pod="openstack/kube-state-metrics-0" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.406359 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2\") " pod="openstack/kube-state-metrics-0" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.410392 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf25e150-e63e-4987-89ce-6e1419e00e88" path="/var/lib/kubelet/pods/cf25e150-e63e-4987-89ce-6e1419e00e88/volumes" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.413469 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2\") " pod="openstack/kube-state-metrics-0" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.413897 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2\") " pod="openstack/kube-state-metrics-0" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.413975 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2\") " pod="openstack/kube-state-metrics-0" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.429978 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6fzfj\" (UniqueName: \"kubernetes.io/projected/9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2-kube-api-access-6fzfj\") pod \"kube-state-metrics-0\" (UID: \"9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2\") " pod="openstack/kube-state-metrics-0" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.469358 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 23:20:10 crc kubenswrapper[5045]: I1125 23:20:10.981656 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 23:20:11 crc kubenswrapper[5045]: I1125 23:20:11.090280 5045 generic.go:334] "Generic (PLEG): container finished" podID="5d09e5d1-9221-4785-abe2-8763781bc338" containerID="84c45197d1e435cf0d6adcc58f6a9a6daabb6262355b5d05181a02173203716a" exitCode=0 Nov 25 23:20:11 crc kubenswrapper[5045]: I1125 23:20:11.090343 5045 generic.go:334] "Generic (PLEG): container finished" podID="5d09e5d1-9221-4785-abe2-8763781bc338" containerID="34b339cadc972286c598dda1220fde24697d8e7e3a84a277cc7cba888b24dfc7" exitCode=0 Nov 25 23:20:11 crc kubenswrapper[5045]: I1125 23:20:11.090428 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5d09e5d1-9221-4785-abe2-8763781bc338","Type":"ContainerDied","Data":"84c45197d1e435cf0d6adcc58f6a9a6daabb6262355b5d05181a02173203716a"} Nov 25 23:20:11 crc kubenswrapper[5045]: I1125 23:20:11.090479 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5d09e5d1-9221-4785-abe2-8763781bc338","Type":"ContainerDied","Data":"34b339cadc972286c598dda1220fde24697d8e7e3a84a277cc7cba888b24dfc7"} Nov 25 23:20:11 crc kubenswrapper[5045]: I1125 23:20:11.095421 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2","Type":"ContainerStarted","Data":"25146cd71a6295ac92ca4285961dde90a5ba81bf0d760100ba47bdec73ab1f8a"} Nov 25 23:20:12 crc kubenswrapper[5045]: I1125 23:20:12.106626 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2","Type":"ContainerStarted","Data":"d6c6e6b156f453a1d2a14b69f7ac3a551d770998db70169e924502296de784b9"} Nov 25 23:20:12 crc kubenswrapper[5045]: I1125 23:20:12.107182 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 23:20:12 crc kubenswrapper[5045]: I1125 23:20:12.134541 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.779005894 podStartE2EDuration="2.134514464s" podCreationTimestamp="2025-11-25 23:20:10 +0000 UTC" firstStartedPulling="2025-11-25 23:20:10.98628546 +0000 UTC m=+1267.343944572" lastFinishedPulling="2025-11-25 23:20:11.34179402 +0000 UTC m=+1267.699453142" observedRunningTime="2025-11-25 23:20:12.126570673 +0000 UTC m=+1268.484229825" watchObservedRunningTime="2025-11-25 23:20:12.134514464 +0000 UTC m=+1268.492173616" Nov 25 23:20:13 crc kubenswrapper[5045]: I1125 23:20:13.357240 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 23:20:13 crc kubenswrapper[5045]: I1125 23:20:13.357602 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 23:20:13 crc kubenswrapper[5045]: I1125 23:20:13.404451 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.440316 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a9163334-4411-4a03-9d40-8a86305dc8ee" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.175:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.440827 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a9163334-4411-4a03-9d40-8a86305dc8ee" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.175:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.610172 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.698012 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5d09e5d1-9221-4785-abe2-8763781bc338-run-httpd\") pod \"5d09e5d1-9221-4785-abe2-8763781bc338\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.698146 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-scripts\") pod \"5d09e5d1-9221-4785-abe2-8763781bc338\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.698219 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-config-data\") pod \"5d09e5d1-9221-4785-abe2-8763781bc338\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.698279 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-sg-core-conf-yaml\") pod \"5d09e5d1-9221-4785-abe2-8763781bc338\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.698348 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-combined-ca-bundle\") pod \"5d09e5d1-9221-4785-abe2-8763781bc338\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.698445 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5d09e5d1-9221-4785-abe2-8763781bc338-log-httpd\") pod \"5d09e5d1-9221-4785-abe2-8763781bc338\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.698532 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rqlzp\" (UniqueName: \"kubernetes.io/projected/5d09e5d1-9221-4785-abe2-8763781bc338-kube-api-access-rqlzp\") pod \"5d09e5d1-9221-4785-abe2-8763781bc338\" (UID: \"5d09e5d1-9221-4785-abe2-8763781bc338\") " Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.701205 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d09e5d1-9221-4785-abe2-8763781bc338-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5d09e5d1-9221-4785-abe2-8763781bc338" (UID: "5d09e5d1-9221-4785-abe2-8763781bc338"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.701447 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d09e5d1-9221-4785-abe2-8763781bc338-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5d09e5d1-9221-4785-abe2-8763781bc338" (UID: "5d09e5d1-9221-4785-abe2-8763781bc338"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.706009 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d09e5d1-9221-4785-abe2-8763781bc338-kube-api-access-rqlzp" (OuterVolumeSpecName: "kube-api-access-rqlzp") pod "5d09e5d1-9221-4785-abe2-8763781bc338" (UID: "5d09e5d1-9221-4785-abe2-8763781bc338"). InnerVolumeSpecName "kube-api-access-rqlzp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.706459 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-scripts" (OuterVolumeSpecName: "scripts") pod "5d09e5d1-9221-4785-abe2-8763781bc338" (UID: "5d09e5d1-9221-4785-abe2-8763781bc338"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.745891 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5d09e5d1-9221-4785-abe2-8763781bc338" (UID: "5d09e5d1-9221-4785-abe2-8763781bc338"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.783438 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5d09e5d1-9221-4785-abe2-8763781bc338" (UID: "5d09e5d1-9221-4785-abe2-8763781bc338"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.800984 5045 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5d09e5d1-9221-4785-abe2-8763781bc338-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.801027 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rqlzp\" (UniqueName: \"kubernetes.io/projected/5d09e5d1-9221-4785-abe2-8763781bc338-kube-api-access-rqlzp\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.801050 5045 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5d09e5d1-9221-4785-abe2-8763781bc338-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.801068 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.801084 5045 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.801101 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.821354 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-config-data" (OuterVolumeSpecName: "config-data") pod "5d09e5d1-9221-4785-abe2-8763781bc338" (UID: "5d09e5d1-9221-4785-abe2-8763781bc338"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:14 crc kubenswrapper[5045]: I1125 23:20:14.903544 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d09e5d1-9221-4785-abe2-8763781bc338-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.142303 5045 generic.go:334] "Generic (PLEG): container finished" podID="5d09e5d1-9221-4785-abe2-8763781bc338" containerID="1b1787917344e8a2e0756c031616e6565906d3a31e1f75f0bc4cc2eb0b981ae1" exitCode=0 Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.142339 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5d09e5d1-9221-4785-abe2-8763781bc338","Type":"ContainerDied","Data":"1b1787917344e8a2e0756c031616e6565906d3a31e1f75f0bc4cc2eb0b981ae1"} Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.142363 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5d09e5d1-9221-4785-abe2-8763781bc338","Type":"ContainerDied","Data":"cf4df5b13e6bd370890d25f923da887027e546267544d49bdec7296d6e13bcd5"} Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.142369 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.142380 5045 scope.go:117] "RemoveContainer" containerID="84c45197d1e435cf0d6adcc58f6a9a6daabb6262355b5d05181a02173203716a" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.178342 5045 scope.go:117] "RemoveContainer" containerID="761f970d4b21945f7c6cfca9f3b520aff4bf72adf5b95fce325ce614f5d1b5c5" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.197849 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.201004 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.221666 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:20:15 crc kubenswrapper[5045]: E1125 23:20:15.222115 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d09e5d1-9221-4785-abe2-8763781bc338" containerName="ceilometer-central-agent" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.222132 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d09e5d1-9221-4785-abe2-8763781bc338" containerName="ceilometer-central-agent" Nov 25 23:20:15 crc kubenswrapper[5045]: E1125 23:20:15.222153 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d09e5d1-9221-4785-abe2-8763781bc338" containerName="ceilometer-notification-agent" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.222161 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d09e5d1-9221-4785-abe2-8763781bc338" containerName="ceilometer-notification-agent" Nov 25 23:20:15 crc kubenswrapper[5045]: E1125 23:20:15.222179 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d09e5d1-9221-4785-abe2-8763781bc338" containerName="sg-core" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.222188 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d09e5d1-9221-4785-abe2-8763781bc338" containerName="sg-core" Nov 25 23:20:15 crc kubenswrapper[5045]: E1125 23:20:15.222200 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d09e5d1-9221-4785-abe2-8763781bc338" containerName="proxy-httpd" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.222207 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d09e5d1-9221-4785-abe2-8763781bc338" containerName="proxy-httpd" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.222421 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d09e5d1-9221-4785-abe2-8763781bc338" containerName="proxy-httpd" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.233000 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d09e5d1-9221-4785-abe2-8763781bc338" containerName="sg-core" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.233029 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d09e5d1-9221-4785-abe2-8763781bc338" containerName="ceilometer-notification-agent" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.233040 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d09e5d1-9221-4785-abe2-8763781bc338" containerName="ceilometer-central-agent" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.222599 5045 scope.go:117] "RemoveContainer" containerID="1b1787917344e8a2e0756c031616e6565906d3a31e1f75f0bc4cc2eb0b981ae1" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.234868 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.234959 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.236726 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.237063 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.239081 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.297041 5045 scope.go:117] "RemoveContainer" containerID="34b339cadc972286c598dda1220fde24697d8e7e3a84a277cc7cba888b24dfc7" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.310980 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.311054 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-log-httpd\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.311103 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.311139 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-run-httpd\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.311188 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krqfk\" (UniqueName: \"kubernetes.io/projected/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-kube-api-access-krqfk\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.311212 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-config-data\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.311227 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.311245 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-scripts\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.315784 5045 scope.go:117] "RemoveContainer" containerID="84c45197d1e435cf0d6adcc58f6a9a6daabb6262355b5d05181a02173203716a" Nov 25 23:20:15 crc kubenswrapper[5045]: E1125 23:20:15.316189 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84c45197d1e435cf0d6adcc58f6a9a6daabb6262355b5d05181a02173203716a\": container with ID starting with 84c45197d1e435cf0d6adcc58f6a9a6daabb6262355b5d05181a02173203716a not found: ID does not exist" containerID="84c45197d1e435cf0d6adcc58f6a9a6daabb6262355b5d05181a02173203716a" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.316216 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84c45197d1e435cf0d6adcc58f6a9a6daabb6262355b5d05181a02173203716a"} err="failed to get container status \"84c45197d1e435cf0d6adcc58f6a9a6daabb6262355b5d05181a02173203716a\": rpc error: code = NotFound desc = could not find container \"84c45197d1e435cf0d6adcc58f6a9a6daabb6262355b5d05181a02173203716a\": container with ID starting with 84c45197d1e435cf0d6adcc58f6a9a6daabb6262355b5d05181a02173203716a not found: ID does not exist" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.316238 5045 scope.go:117] "RemoveContainer" containerID="761f970d4b21945f7c6cfca9f3b520aff4bf72adf5b95fce325ce614f5d1b5c5" Nov 25 23:20:15 crc kubenswrapper[5045]: E1125 23:20:15.316541 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"761f970d4b21945f7c6cfca9f3b520aff4bf72adf5b95fce325ce614f5d1b5c5\": container with ID starting with 761f970d4b21945f7c6cfca9f3b520aff4bf72adf5b95fce325ce614f5d1b5c5 not found: ID does not exist" containerID="761f970d4b21945f7c6cfca9f3b520aff4bf72adf5b95fce325ce614f5d1b5c5" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.316563 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"761f970d4b21945f7c6cfca9f3b520aff4bf72adf5b95fce325ce614f5d1b5c5"} err="failed to get container status \"761f970d4b21945f7c6cfca9f3b520aff4bf72adf5b95fce325ce614f5d1b5c5\": rpc error: code = NotFound desc = could not find container \"761f970d4b21945f7c6cfca9f3b520aff4bf72adf5b95fce325ce614f5d1b5c5\": container with ID starting with 761f970d4b21945f7c6cfca9f3b520aff4bf72adf5b95fce325ce614f5d1b5c5 not found: ID does not exist" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.316579 5045 scope.go:117] "RemoveContainer" containerID="1b1787917344e8a2e0756c031616e6565906d3a31e1f75f0bc4cc2eb0b981ae1" Nov 25 23:20:15 crc kubenswrapper[5045]: E1125 23:20:15.317066 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b1787917344e8a2e0756c031616e6565906d3a31e1f75f0bc4cc2eb0b981ae1\": container with ID starting with 1b1787917344e8a2e0756c031616e6565906d3a31e1f75f0bc4cc2eb0b981ae1 not found: ID does not exist" containerID="1b1787917344e8a2e0756c031616e6565906d3a31e1f75f0bc4cc2eb0b981ae1" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.317091 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b1787917344e8a2e0756c031616e6565906d3a31e1f75f0bc4cc2eb0b981ae1"} err="failed to get container status \"1b1787917344e8a2e0756c031616e6565906d3a31e1f75f0bc4cc2eb0b981ae1\": rpc error: code = NotFound desc = could not find container \"1b1787917344e8a2e0756c031616e6565906d3a31e1f75f0bc4cc2eb0b981ae1\": container with ID starting with 1b1787917344e8a2e0756c031616e6565906d3a31e1f75f0bc4cc2eb0b981ae1 not found: ID does not exist" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.317109 5045 scope.go:117] "RemoveContainer" containerID="34b339cadc972286c598dda1220fde24697d8e7e3a84a277cc7cba888b24dfc7" Nov 25 23:20:15 crc kubenswrapper[5045]: E1125 23:20:15.317397 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34b339cadc972286c598dda1220fde24697d8e7e3a84a277cc7cba888b24dfc7\": container with ID starting with 34b339cadc972286c598dda1220fde24697d8e7e3a84a277cc7cba888b24dfc7 not found: ID does not exist" containerID="34b339cadc972286c598dda1220fde24697d8e7e3a84a277cc7cba888b24dfc7" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.317426 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34b339cadc972286c598dda1220fde24697d8e7e3a84a277cc7cba888b24dfc7"} err="failed to get container status \"34b339cadc972286c598dda1220fde24697d8e7e3a84a277cc7cba888b24dfc7\": rpc error: code = NotFound desc = could not find container \"34b339cadc972286c598dda1220fde24697d8e7e3a84a277cc7cba888b24dfc7\": container with ID starting with 34b339cadc972286c598dda1220fde24697d8e7e3a84a277cc7cba888b24dfc7 not found: ID does not exist" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.367399 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.394771 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.412365 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-scripts\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.412480 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.412548 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-log-httpd\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.412593 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.412625 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-run-httpd\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.412678 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krqfk\" (UniqueName: \"kubernetes.io/projected/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-kube-api-access-krqfk\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.412727 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-config-data\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.412749 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.413856 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-log-httpd\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.415088 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-run-httpd\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.418121 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.418499 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-config-data\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.419081 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-scripts\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.419245 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.419620 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.440496 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krqfk\" (UniqueName: \"kubernetes.io/projected/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-kube-api-access-krqfk\") pod \"ceilometer-0\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " pod="openstack/ceilometer-0" Nov 25 23:20:15 crc kubenswrapper[5045]: I1125 23:20:15.600706 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:20:16 crc kubenswrapper[5045]: I1125 23:20:16.071976 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:20:16 crc kubenswrapper[5045]: I1125 23:20:16.161219 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7","Type":"ContainerStarted","Data":"ea7c839eae0ff72acaf09dcca4d0d47f23615dc901254ed06ac203780445b32c"} Nov 25 23:20:16 crc kubenswrapper[5045]: I1125 23:20:16.202623 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 23:20:16 crc kubenswrapper[5045]: I1125 23:20:16.407079 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d09e5d1-9221-4785-abe2-8763781bc338" path="/var/lib/kubelet/pods/5d09e5d1-9221-4785-abe2-8763781bc338/volumes" Nov 25 23:20:17 crc kubenswrapper[5045]: I1125 23:20:17.178497 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7","Type":"ContainerStarted","Data":"f09808e06c0c37b5520e14909e9f169bd754e2d69d2c5acc552b3e179e8ea456"} Nov 25 23:20:18 crc kubenswrapper[5045]: I1125 23:20:18.202454 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7","Type":"ContainerStarted","Data":"624f643a8e660c75575ca4ec827c4878b81780c4c82935753f8f2e317e9db7ce"} Nov 25 23:20:19 crc kubenswrapper[5045]: I1125 23:20:19.216874 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7","Type":"ContainerStarted","Data":"e7a91248543a5919d8a8362a948dfff0e0ca83ab000dc7a859d0e88531b4bc8d"} Nov 25 23:20:20 crc kubenswrapper[5045]: I1125 23:20:20.229546 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7","Type":"ContainerStarted","Data":"4423fbc9e6e969da9fbb1ca9b348e3825de1bd9a6a9ec8e1aad7f4c437da55c6"} Nov 25 23:20:20 crc kubenswrapper[5045]: I1125 23:20:20.229923 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 23:20:20 crc kubenswrapper[5045]: I1125 23:20:20.267171 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.81208619 podStartE2EDuration="5.26713955s" podCreationTimestamp="2025-11-25 23:20:15 +0000 UTC" firstStartedPulling="2025-11-25 23:20:16.10045665 +0000 UTC m=+1272.458115772" lastFinishedPulling="2025-11-25 23:20:19.55550999 +0000 UTC m=+1275.913169132" observedRunningTime="2025-11-25 23:20:20.259210909 +0000 UTC m=+1276.616870031" watchObservedRunningTime="2025-11-25 23:20:20.26713955 +0000 UTC m=+1276.624798692" Nov 25 23:20:20 crc kubenswrapper[5045]: I1125 23:20:20.485840 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 23:20:23 crc kubenswrapper[5045]: I1125 23:20:23.364831 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 23:20:23 crc kubenswrapper[5045]: I1125 23:20:23.365580 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 23:20:23 crc kubenswrapper[5045]: I1125 23:20:23.370694 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 23:20:23 crc kubenswrapper[5045]: I1125 23:20:23.373557 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.277396 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.282469 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.478154 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-68d4b6d797-l48zw"] Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.479989 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.506895 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68d4b6d797-l48zw"] Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.645246 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-dns-svc\") pod \"dnsmasq-dns-68d4b6d797-l48zw\" (UID: \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\") " pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.645296 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-config\") pod \"dnsmasq-dns-68d4b6d797-l48zw\" (UID: \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\") " pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.645399 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-ovsdbserver-sb\") pod \"dnsmasq-dns-68d4b6d797-l48zw\" (UID: \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\") " pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.645426 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-ovsdbserver-nb\") pod \"dnsmasq-dns-68d4b6d797-l48zw\" (UID: \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\") " pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.645450 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xxlw\" (UniqueName: \"kubernetes.io/projected/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-kube-api-access-9xxlw\") pod \"dnsmasq-dns-68d4b6d797-l48zw\" (UID: \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\") " pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.747409 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-ovsdbserver-sb\") pod \"dnsmasq-dns-68d4b6d797-l48zw\" (UID: \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\") " pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.747952 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-ovsdbserver-nb\") pod \"dnsmasq-dns-68d4b6d797-l48zw\" (UID: \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\") " pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.747974 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xxlw\" (UniqueName: \"kubernetes.io/projected/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-kube-api-access-9xxlw\") pod \"dnsmasq-dns-68d4b6d797-l48zw\" (UID: \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\") " pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.748071 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-dns-svc\") pod \"dnsmasq-dns-68d4b6d797-l48zw\" (UID: \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\") " pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.748101 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-config\") pod \"dnsmasq-dns-68d4b6d797-l48zw\" (UID: \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\") " pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.749305 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-config\") pod \"dnsmasq-dns-68d4b6d797-l48zw\" (UID: \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\") " pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.749422 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-ovsdbserver-nb\") pod \"dnsmasq-dns-68d4b6d797-l48zw\" (UID: \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\") " pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.749733 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-dns-svc\") pod \"dnsmasq-dns-68d4b6d797-l48zw\" (UID: \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\") " pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.750084 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-ovsdbserver-sb\") pod \"dnsmasq-dns-68d4b6d797-l48zw\" (UID: \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\") " pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.779673 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xxlw\" (UniqueName: \"kubernetes.io/projected/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-kube-api-access-9xxlw\") pod \"dnsmasq-dns-68d4b6d797-l48zw\" (UID: \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\") " pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:20:24 crc kubenswrapper[5045]: I1125 23:20:24.804927 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:20:25 crc kubenswrapper[5045]: I1125 23:20:25.282293 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68d4b6d797-l48zw"] Nov 25 23:20:26 crc kubenswrapper[5045]: I1125 23:20:26.300643 5045 generic.go:334] "Generic (PLEG): container finished" podID="4d447d31-1ecf-4836-b96f-ebcde66a1cbd" containerID="d621c0f0b00602c34bf528be0d1d539fc54111ca95527567d162787db93e8879" exitCode=0 Nov 25 23:20:26 crc kubenswrapper[5045]: I1125 23:20:26.302492 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" event={"ID":"4d447d31-1ecf-4836-b96f-ebcde66a1cbd","Type":"ContainerDied","Data":"d621c0f0b00602c34bf528be0d1d539fc54111ca95527567d162787db93e8879"} Nov 25 23:20:26 crc kubenswrapper[5045]: I1125 23:20:26.302581 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" event={"ID":"4d447d31-1ecf-4836-b96f-ebcde66a1cbd","Type":"ContainerStarted","Data":"ca6cfe57fa22665c92d6c752f7c458885341b5b61a12595266cae83ccb14e42c"} Nov 25 23:20:26 crc kubenswrapper[5045]: I1125 23:20:26.783912 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:20:26 crc kubenswrapper[5045]: I1125 23:20:26.784230 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" containerName="ceilometer-central-agent" containerID="cri-o://f09808e06c0c37b5520e14909e9f169bd754e2d69d2c5acc552b3e179e8ea456" gracePeriod=30 Nov 25 23:20:26 crc kubenswrapper[5045]: I1125 23:20:26.784330 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" containerName="proxy-httpd" containerID="cri-o://4423fbc9e6e969da9fbb1ca9b348e3825de1bd9a6a9ec8e1aad7f4c437da55c6" gracePeriod=30 Nov 25 23:20:26 crc kubenswrapper[5045]: I1125 23:20:26.784372 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" containerName="sg-core" containerID="cri-o://e7a91248543a5919d8a8362a948dfff0e0ca83ab000dc7a859d0e88531b4bc8d" gracePeriod=30 Nov 25 23:20:26 crc kubenswrapper[5045]: I1125 23:20:26.784419 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" containerName="ceilometer-notification-agent" containerID="cri-o://624f643a8e660c75575ca4ec827c4878b81780c4c82935753f8f2e317e9db7ce" gracePeriod=30 Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.102225 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.320314 5045 generic.go:334] "Generic (PLEG): container finished" podID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" containerID="4423fbc9e6e969da9fbb1ca9b348e3825de1bd9a6a9ec8e1aad7f4c437da55c6" exitCode=0 Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.320353 5045 generic.go:334] "Generic (PLEG): container finished" podID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" containerID="e7a91248543a5919d8a8362a948dfff0e0ca83ab000dc7a859d0e88531b4bc8d" exitCode=2 Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.320362 5045 generic.go:334] "Generic (PLEG): container finished" podID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" containerID="f09808e06c0c37b5520e14909e9f169bd754e2d69d2c5acc552b3e179e8ea456" exitCode=0 Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.320436 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7","Type":"ContainerDied","Data":"4423fbc9e6e969da9fbb1ca9b348e3825de1bd9a6a9ec8e1aad7f4c437da55c6"} Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.320466 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7","Type":"ContainerDied","Data":"e7a91248543a5919d8a8362a948dfff0e0ca83ab000dc7a859d0e88531b4bc8d"} Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.320480 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7","Type":"ContainerDied","Data":"f09808e06c0c37b5520e14909e9f169bd754e2d69d2c5acc552b3e179e8ea456"} Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.322547 5045 generic.go:334] "Generic (PLEG): container finished" podID="6f7dc6cc-20be-413d-9e0a-e5a14619f323" containerID="4a3fd3c1df741d1243f675d495a6796d9edf95568fb448bc5db7b90cf70cce12" exitCode=137 Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.322587 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6f7dc6cc-20be-413d-9e0a-e5a14619f323","Type":"ContainerDied","Data":"4a3fd3c1df741d1243f675d495a6796d9edf95568fb448bc5db7b90cf70cce12"} Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.322603 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6f7dc6cc-20be-413d-9e0a-e5a14619f323","Type":"ContainerDied","Data":"8247596d446c5f462c52f02de3cb98de409d54e3fb5a35286731ff810c1e6f61"} Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.322613 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8247596d446c5f462c52f02de3cb98de409d54e3fb5a35286731ff810c1e6f61" Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.323598 5045 generic.go:334] "Generic (PLEG): container finished" podID="285f19f1-962e-4a44-bf63-50137f6aa140" containerID="21a5b2c079233b35a3de8f72476ff5e347adb7408b76fc842cef38ee8142eef9" exitCode=137 Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.323634 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"285f19f1-962e-4a44-bf63-50137f6aa140","Type":"ContainerDied","Data":"21a5b2c079233b35a3de8f72476ff5e347adb7408b76fc842cef38ee8142eef9"} Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.323649 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"285f19f1-962e-4a44-bf63-50137f6aa140","Type":"ContainerDied","Data":"5331934a7b92b17d89e69415def42b8c5c628c36fcd5fb8a21e5c82626a7b02a"} Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.323658 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5331934a7b92b17d89e69415def42b8c5c628c36fcd5fb8a21e5c82626a7b02a" Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.328932 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" event={"ID":"4d447d31-1ecf-4836-b96f-ebcde66a1cbd","Type":"ContainerStarted","Data":"f52182ddf9063ebe49fb12f49807bbabe4421c85e00736512bc14b85c6e649a3"} Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.329026 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="a9163334-4411-4a03-9d40-8a86305dc8ee" containerName="nova-api-log" containerID="cri-o://e7f235c87e34c945735fef653700403e6e6dda64d89f91c29b3c653e91821780" gracePeriod=30 Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.329260 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="a9163334-4411-4a03-9d40-8a86305dc8ee" containerName="nova-api-api" containerID="cri-o://c64a1c1e0d3565d0789bffa029b2bfb511715015e4b102d8aa471580e05f62f3" gracePeriod=30 Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.347137 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" podStartSLOduration=3.347121006 podStartE2EDuration="3.347121006s" podCreationTimestamp="2025-11-25 23:20:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:20:27.344427504 +0000 UTC m=+1283.702086616" watchObservedRunningTime="2025-11-25 23:20:27.347121006 +0000 UTC m=+1283.704780118" Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.353610 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.354856 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.511059 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/285f19f1-962e-4a44-bf63-50137f6aa140-combined-ca-bundle\") pod \"285f19f1-962e-4a44-bf63-50137f6aa140\" (UID: \"285f19f1-962e-4a44-bf63-50137f6aa140\") " Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.511106 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/285f19f1-962e-4a44-bf63-50137f6aa140-config-data\") pod \"285f19f1-962e-4a44-bf63-50137f6aa140\" (UID: \"285f19f1-962e-4a44-bf63-50137f6aa140\") " Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.511132 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f7dc6cc-20be-413d-9e0a-e5a14619f323-logs\") pod \"6f7dc6cc-20be-413d-9e0a-e5a14619f323\" (UID: \"6f7dc6cc-20be-413d-9e0a-e5a14619f323\") " Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.511215 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hfj7k\" (UniqueName: \"kubernetes.io/projected/6f7dc6cc-20be-413d-9e0a-e5a14619f323-kube-api-access-hfj7k\") pod \"6f7dc6cc-20be-413d-9e0a-e5a14619f323\" (UID: \"6f7dc6cc-20be-413d-9e0a-e5a14619f323\") " Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.511245 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f7dc6cc-20be-413d-9e0a-e5a14619f323-combined-ca-bundle\") pod \"6f7dc6cc-20be-413d-9e0a-e5a14619f323\" (UID: \"6f7dc6cc-20be-413d-9e0a-e5a14619f323\") " Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.511293 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f7dc6cc-20be-413d-9e0a-e5a14619f323-config-data\") pod \"6f7dc6cc-20be-413d-9e0a-e5a14619f323\" (UID: \"6f7dc6cc-20be-413d-9e0a-e5a14619f323\") " Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.511307 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4m2r\" (UniqueName: \"kubernetes.io/projected/285f19f1-962e-4a44-bf63-50137f6aa140-kube-api-access-q4m2r\") pod \"285f19f1-962e-4a44-bf63-50137f6aa140\" (UID: \"285f19f1-962e-4a44-bf63-50137f6aa140\") " Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.522760 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f7dc6cc-20be-413d-9e0a-e5a14619f323-logs" (OuterVolumeSpecName: "logs") pod "6f7dc6cc-20be-413d-9e0a-e5a14619f323" (UID: "6f7dc6cc-20be-413d-9e0a-e5a14619f323"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.526635 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f7dc6cc-20be-413d-9e0a-e5a14619f323-kube-api-access-hfj7k" (OuterVolumeSpecName: "kube-api-access-hfj7k") pod "6f7dc6cc-20be-413d-9e0a-e5a14619f323" (UID: "6f7dc6cc-20be-413d-9e0a-e5a14619f323"). InnerVolumeSpecName "kube-api-access-hfj7k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.536819 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/285f19f1-962e-4a44-bf63-50137f6aa140-kube-api-access-q4m2r" (OuterVolumeSpecName: "kube-api-access-q4m2r") pod "285f19f1-962e-4a44-bf63-50137f6aa140" (UID: "285f19f1-962e-4a44-bf63-50137f6aa140"). InnerVolumeSpecName "kube-api-access-q4m2r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.551533 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/285f19f1-962e-4a44-bf63-50137f6aa140-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "285f19f1-962e-4a44-bf63-50137f6aa140" (UID: "285f19f1-962e-4a44-bf63-50137f6aa140"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.556691 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/285f19f1-962e-4a44-bf63-50137f6aa140-config-data" (OuterVolumeSpecName: "config-data") pod "285f19f1-962e-4a44-bf63-50137f6aa140" (UID: "285f19f1-962e-4a44-bf63-50137f6aa140"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.572078 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f7dc6cc-20be-413d-9e0a-e5a14619f323-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6f7dc6cc-20be-413d-9e0a-e5a14619f323" (UID: "6f7dc6cc-20be-413d-9e0a-e5a14619f323"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.580163 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f7dc6cc-20be-413d-9e0a-e5a14619f323-config-data" (OuterVolumeSpecName: "config-data") pod "6f7dc6cc-20be-413d-9e0a-e5a14619f323" (UID: "6f7dc6cc-20be-413d-9e0a-e5a14619f323"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.619034 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/285f19f1-962e-4a44-bf63-50137f6aa140-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.619271 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/285f19f1-962e-4a44-bf63-50137f6aa140-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.619345 5045 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f7dc6cc-20be-413d-9e0a-e5a14619f323-logs\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.619412 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hfj7k\" (UniqueName: \"kubernetes.io/projected/6f7dc6cc-20be-413d-9e0a-e5a14619f323-kube-api-access-hfj7k\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.619482 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f7dc6cc-20be-413d-9e0a-e5a14619f323-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.619555 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f7dc6cc-20be-413d-9e0a-e5a14619f323-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:27 crc kubenswrapper[5045]: I1125 23:20:27.619627 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4m2r\" (UniqueName: \"kubernetes.io/projected/285f19f1-962e-4a44-bf63-50137f6aa140-kube-api-access-q4m2r\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.338518 5045 generic.go:334] "Generic (PLEG): container finished" podID="a9163334-4411-4a03-9d40-8a86305dc8ee" containerID="e7f235c87e34c945735fef653700403e6e6dda64d89f91c29b3c653e91821780" exitCode=143 Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.339199 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.338613 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a9163334-4411-4a03-9d40-8a86305dc8ee","Type":"ContainerDied","Data":"e7f235c87e34c945735fef653700403e6e6dda64d89f91c29b3c653e91821780"} Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.339842 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.339950 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.378117 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.389144 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.407738 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="285f19f1-962e-4a44-bf63-50137f6aa140" path="/var/lib/kubelet/pods/285f19f1-962e-4a44-bf63-50137f6aa140/volumes" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.408226 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.409904 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 23:20:28 crc kubenswrapper[5045]: E1125 23:20:28.410169 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="285f19f1-962e-4a44-bf63-50137f6aa140" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.410183 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="285f19f1-962e-4a44-bf63-50137f6aa140" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 23:20:28 crc kubenswrapper[5045]: E1125 23:20:28.410209 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f7dc6cc-20be-413d-9e0a-e5a14619f323" containerName="nova-metadata-metadata" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.410217 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f7dc6cc-20be-413d-9e0a-e5a14619f323" containerName="nova-metadata-metadata" Nov 25 23:20:28 crc kubenswrapper[5045]: E1125 23:20:28.410235 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f7dc6cc-20be-413d-9e0a-e5a14619f323" containerName="nova-metadata-log" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.410241 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f7dc6cc-20be-413d-9e0a-e5a14619f323" containerName="nova-metadata-log" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.410382 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f7dc6cc-20be-413d-9e0a-e5a14619f323" containerName="nova-metadata-log" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.410400 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f7dc6cc-20be-413d-9e0a-e5a14619f323" containerName="nova-metadata-metadata" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.410410 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="285f19f1-962e-4a44-bf63-50137f6aa140" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.410947 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.416353 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.416572 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.416728 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.439186 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.457312 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.466913 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.468522 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.473056 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.473265 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.475827 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.533429 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a276597-ed77-4cd6-95b0-57e64be23060-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a276597-ed77-4cd6-95b0-57e64be23060\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.533484 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a276597-ed77-4cd6-95b0-57e64be23060-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a276597-ed77-4cd6-95b0-57e64be23060\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.533537 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a276597-ed77-4cd6-95b0-57e64be23060-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a276597-ed77-4cd6-95b0-57e64be23060\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.533644 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwr4g\" (UniqueName: \"kubernetes.io/projected/4a276597-ed77-4cd6-95b0-57e64be23060-kube-api-access-fwr4g\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a276597-ed77-4cd6-95b0-57e64be23060\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.533735 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a276597-ed77-4cd6-95b0-57e64be23060-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a276597-ed77-4cd6-95b0-57e64be23060\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.634840 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a276597-ed77-4cd6-95b0-57e64be23060-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a276597-ed77-4cd6-95b0-57e64be23060\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.635102 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1aa735ee-28a3-4e0e-8f53-f2baea365124-logs\") pod \"nova-metadata-0\" (UID: \"1aa735ee-28a3-4e0e-8f53-f2baea365124\") " pod="openstack/nova-metadata-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.635221 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a276597-ed77-4cd6-95b0-57e64be23060-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a276597-ed77-4cd6-95b0-57e64be23060\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.635312 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a276597-ed77-4cd6-95b0-57e64be23060-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a276597-ed77-4cd6-95b0-57e64be23060\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.635382 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aa735ee-28a3-4e0e-8f53-f2baea365124-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1aa735ee-28a3-4e0e-8f53-f2baea365124\") " pod="openstack/nova-metadata-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.635479 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a276597-ed77-4cd6-95b0-57e64be23060-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a276597-ed77-4cd6-95b0-57e64be23060\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.635573 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aa735ee-28a3-4e0e-8f53-f2baea365124-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1aa735ee-28a3-4e0e-8f53-f2baea365124\") " pod="openstack/nova-metadata-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.635675 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xg8c\" (UniqueName: \"kubernetes.io/projected/1aa735ee-28a3-4e0e-8f53-f2baea365124-kube-api-access-8xg8c\") pod \"nova-metadata-0\" (UID: \"1aa735ee-28a3-4e0e-8f53-f2baea365124\") " pod="openstack/nova-metadata-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.635772 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwr4g\" (UniqueName: \"kubernetes.io/projected/4a276597-ed77-4cd6-95b0-57e64be23060-kube-api-access-fwr4g\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a276597-ed77-4cd6-95b0-57e64be23060\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.635869 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1aa735ee-28a3-4e0e-8f53-f2baea365124-config-data\") pod \"nova-metadata-0\" (UID: \"1aa735ee-28a3-4e0e-8f53-f2baea365124\") " pod="openstack/nova-metadata-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.639455 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a276597-ed77-4cd6-95b0-57e64be23060-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a276597-ed77-4cd6-95b0-57e64be23060\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.645278 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a276597-ed77-4cd6-95b0-57e64be23060-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a276597-ed77-4cd6-95b0-57e64be23060\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.652093 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a276597-ed77-4cd6-95b0-57e64be23060-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a276597-ed77-4cd6-95b0-57e64be23060\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.653127 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a276597-ed77-4cd6-95b0-57e64be23060-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a276597-ed77-4cd6-95b0-57e64be23060\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.653600 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwr4g\" (UniqueName: \"kubernetes.io/projected/4a276597-ed77-4cd6-95b0-57e64be23060-kube-api-access-fwr4g\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a276597-ed77-4cd6-95b0-57e64be23060\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.732365 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.736798 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aa735ee-28a3-4e0e-8f53-f2baea365124-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1aa735ee-28a3-4e0e-8f53-f2baea365124\") " pod="openstack/nova-metadata-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.736987 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aa735ee-28a3-4e0e-8f53-f2baea365124-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1aa735ee-28a3-4e0e-8f53-f2baea365124\") " pod="openstack/nova-metadata-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.737092 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xg8c\" (UniqueName: \"kubernetes.io/projected/1aa735ee-28a3-4e0e-8f53-f2baea365124-kube-api-access-8xg8c\") pod \"nova-metadata-0\" (UID: \"1aa735ee-28a3-4e0e-8f53-f2baea365124\") " pod="openstack/nova-metadata-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.737193 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1aa735ee-28a3-4e0e-8f53-f2baea365124-config-data\") pod \"nova-metadata-0\" (UID: \"1aa735ee-28a3-4e0e-8f53-f2baea365124\") " pod="openstack/nova-metadata-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.737320 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1aa735ee-28a3-4e0e-8f53-f2baea365124-logs\") pod \"nova-metadata-0\" (UID: \"1aa735ee-28a3-4e0e-8f53-f2baea365124\") " pod="openstack/nova-metadata-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.737804 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1aa735ee-28a3-4e0e-8f53-f2baea365124-logs\") pod \"nova-metadata-0\" (UID: \"1aa735ee-28a3-4e0e-8f53-f2baea365124\") " pod="openstack/nova-metadata-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.742185 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aa735ee-28a3-4e0e-8f53-f2baea365124-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1aa735ee-28a3-4e0e-8f53-f2baea365124\") " pod="openstack/nova-metadata-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.742644 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1aa735ee-28a3-4e0e-8f53-f2baea365124-config-data\") pod \"nova-metadata-0\" (UID: \"1aa735ee-28a3-4e0e-8f53-f2baea365124\") " pod="openstack/nova-metadata-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.744467 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aa735ee-28a3-4e0e-8f53-f2baea365124-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1aa735ee-28a3-4e0e-8f53-f2baea365124\") " pod="openstack/nova-metadata-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.768671 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xg8c\" (UniqueName: \"kubernetes.io/projected/1aa735ee-28a3-4e0e-8f53-f2baea365124-kube-api-access-8xg8c\") pod \"nova-metadata-0\" (UID: \"1aa735ee-28a3-4e0e-8f53-f2baea365124\") " pod="openstack/nova-metadata-0" Nov 25 23:20:28 crc kubenswrapper[5045]: I1125 23:20:28.786361 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 23:20:29 crc kubenswrapper[5045]: E1125 23:20:29.170080 5045 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f7dc6cc_20be_413d_9e0a_e5a14619f323.slice/crio-4a3fd3c1df741d1243f675d495a6796d9edf95568fb448bc5db7b90cf70cce12.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod285f19f1_962e_4a44_bf63_50137f6aa140.slice/crio-21a5b2c079233b35a3de8f72476ff5e347adb7408b76fc842cef38ee8142eef9.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod285f19f1_962e_4a44_bf63_50137f6aa140.slice/crio-5331934a7b92b17d89e69415def42b8c5c628c36fcd5fb8a21e5c82626a7b02a\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod285f19f1_962e_4a44_bf63_50137f6aa140.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod83215ee2_350f_4838_9973_6a8417a491d0.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6ed9c3c1_ea80_44cd_9149_8beaa7a58fc7.slice/crio-624f643a8e660c75575ca4ec827c4878b81780c4c82935753f8f2e317e9db7ce.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda9163334_4411_4a03_9d40_8a86305dc8ee.slice/crio-conmon-e7f235c87e34c945735fef653700403e6e6dda64d89f91c29b3c653e91821780.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda9163334_4411_4a03_9d40_8a86305dc8ee.slice/crio-e7f235c87e34c945735fef653700403e6e6dda64d89f91c29b3c653e91821780.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f7dc6cc_20be_413d_9e0a_e5a14619f323.slice/crio-conmon-4a3fd3c1df741d1243f675d495a6796d9edf95568fb448bc5db7b90cf70cce12.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f7dc6cc_20be_413d_9e0a_e5a14619f323.slice/crio-8247596d446c5f462c52f02de3cb98de409d54e3fb5a35286731ff810c1e6f61\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod83215ee2_350f_4838_9973_6a8417a491d0.slice/crio-889b657a85e05fd36b37e3817014bb257099861292d346268b752ce369175e46\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6ed9c3c1_ea80_44cd_9149_8beaa7a58fc7.slice/crio-conmon-4423fbc9e6e969da9fbb1ca9b348e3825de1bd9a6a9ec8e1aad7f4c437da55c6.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6ed9c3c1_ea80_44cd_9149_8beaa7a58fc7.slice/crio-conmon-624f643a8e660c75575ca4ec827c4878b81780c4c82935753f8f2e317e9db7ce.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod285f19f1_962e_4a44_bf63_50137f6aa140.slice/crio-conmon-21a5b2c079233b35a3de8f72476ff5e347adb7408b76fc842cef38ee8142eef9.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6ed9c3c1_ea80_44cd_9149_8beaa7a58fc7.slice/crio-4423fbc9e6e969da9fbb1ca9b348e3825de1bd9a6a9ec8e1aad7f4c437da55c6.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f7dc6cc_20be_413d_9e0a_e5a14619f323.slice\": RecentStats: unable to find data in memory cache]" Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.237504 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.339502 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 23:20:29 crc kubenswrapper[5045]: W1125 23:20:29.348746 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1aa735ee_28a3_4e0e_8f53_f2baea365124.slice/crio-779c640e5721f199c5c25b3798d9db0cc2817a4dc03db1a487d2ba7a0ff86b94 WatchSource:0}: Error finding container 779c640e5721f199c5c25b3798d9db0cc2817a4dc03db1a487d2ba7a0ff86b94: Status 404 returned error can't find the container with id 779c640e5721f199c5c25b3798d9db0cc2817a4dc03db1a487d2ba7a0ff86b94 Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.349856 5045 generic.go:334] "Generic (PLEG): container finished" podID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" containerID="624f643a8e660c75575ca4ec827c4878b81780c4c82935753f8f2e317e9db7ce" exitCode=0 Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.349918 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7","Type":"ContainerDied","Data":"624f643a8e660c75575ca4ec827c4878b81780c4c82935753f8f2e317e9db7ce"} Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.351003 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"4a276597-ed77-4cd6-95b0-57e64be23060","Type":"ContainerStarted","Data":"e5f7da591820179ace09bd33a7142ddf9ea021c0634c9c1d9c24d68f1b624ef0"} Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.435621 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.553393 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-run-httpd\") pod \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.553877 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-combined-ca-bundle\") pod \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.553914 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-config-data\") pod \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.553940 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-krqfk\" (UniqueName: \"kubernetes.io/projected/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-kube-api-access-krqfk\") pod \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.553998 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-ceilometer-tls-certs\") pod \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.554056 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-log-httpd\") pod \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.554125 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-sg-core-conf-yaml\") pod \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.554152 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-scripts\") pod \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\" (UID: \"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7\") " Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.556902 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" (UID: "6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.559239 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" (UID: "6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.562233 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-kube-api-access-krqfk" (OuterVolumeSpecName: "kube-api-access-krqfk") pod "6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" (UID: "6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7"). InnerVolumeSpecName "kube-api-access-krqfk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.563906 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-scripts" (OuterVolumeSpecName: "scripts") pod "6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" (UID: "6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.593870 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" (UID: "6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.626823 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" (UID: "6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.643665 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" (UID: "6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.656248 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.656289 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-krqfk\" (UniqueName: \"kubernetes.io/projected/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-kube-api-access-krqfk\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.656304 5045 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.656316 5045 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.656329 5045 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.656340 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.656352 5045 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.667917 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-config-data" (OuterVolumeSpecName: "config-data") pod "6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" (UID: "6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:29 crc kubenswrapper[5045]: I1125 23:20:29.758446 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.365764 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1aa735ee-28a3-4e0e-8f53-f2baea365124","Type":"ContainerStarted","Data":"38d8282b9e42d29e7bf4a2bea6ecb31922b0d70dd440b9a488927bb8cfea82fa"} Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.366188 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1aa735ee-28a3-4e0e-8f53-f2baea365124","Type":"ContainerStarted","Data":"6098da21ba788204646680dbb9540eaa2bb58907c3e38b3b3bd6799cf5396ded"} Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.366203 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1aa735ee-28a3-4e0e-8f53-f2baea365124","Type":"ContainerStarted","Data":"779c640e5721f199c5c25b3798d9db0cc2817a4dc03db1a487d2ba7a0ff86b94"} Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.369206 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"4a276597-ed77-4cd6-95b0-57e64be23060","Type":"ContainerStarted","Data":"00c076df3f26bd01e2f9229877c796db450b15d9c74a913b1b63541830f6653a"} Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.371673 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7","Type":"ContainerDied","Data":"ea7c839eae0ff72acaf09dcca4d0d47f23615dc901254ed06ac203780445b32c"} Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.371739 5045 scope.go:117] "RemoveContainer" containerID="4423fbc9e6e969da9fbb1ca9b348e3825de1bd9a6a9ec8e1aad7f4c437da55c6" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.371819 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.398615 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.398596525 podStartE2EDuration="2.398596525s" podCreationTimestamp="2025-11-25 23:20:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:20:30.392244581 +0000 UTC m=+1286.749903733" watchObservedRunningTime="2025-11-25 23:20:30.398596525 +0000 UTC m=+1286.756255637" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.401196 5045 scope.go:117] "RemoveContainer" containerID="e7a91248543a5919d8a8362a948dfff0e0ca83ab000dc7a859d0e88531b4bc8d" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.414847 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f7dc6cc-20be-413d-9e0a-e5a14619f323" path="/var/lib/kubelet/pods/6f7dc6cc-20be-413d-9e0a-e5a14619f323/volumes" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.423919 5045 scope.go:117] "RemoveContainer" containerID="624f643a8e660c75575ca4ec827c4878b81780c4c82935753f8f2e317e9db7ce" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.445202 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.445175641 podStartE2EDuration="2.445175641s" podCreationTimestamp="2025-11-25 23:20:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:20:30.428100892 +0000 UTC m=+1286.785760014" watchObservedRunningTime="2025-11-25 23:20:30.445175641 +0000 UTC m=+1286.802834773" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.460805 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.471339 5045 scope.go:117] "RemoveContainer" containerID="f09808e06c0c37b5520e14909e9f169bd754e2d69d2c5acc552b3e179e8ea456" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.474014 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.487730 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:20:30 crc kubenswrapper[5045]: E1125 23:20:30.488149 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" containerName="proxy-httpd" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.488172 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" containerName="proxy-httpd" Nov 25 23:20:30 crc kubenswrapper[5045]: E1125 23:20:30.488191 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" containerName="ceilometer-notification-agent" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.488201 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" containerName="ceilometer-notification-agent" Nov 25 23:20:30 crc kubenswrapper[5045]: E1125 23:20:30.488216 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" containerName="ceilometer-central-agent" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.488224 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" containerName="ceilometer-central-agent" Nov 25 23:20:30 crc kubenswrapper[5045]: E1125 23:20:30.488240 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" containerName="sg-core" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.488247 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" containerName="sg-core" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.488519 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" containerName="ceilometer-notification-agent" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.488553 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" containerName="proxy-httpd" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.488577 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" containerName="ceilometer-central-agent" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.488594 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" containerName="sg-core" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.490944 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.497103 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.498115 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.498137 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.505677 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.675592 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.675648 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.675735 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/77e5f87c-d414-4533-89b6-2455d46a6bd2-run-httpd\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.675797 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnvf7\" (UniqueName: \"kubernetes.io/projected/77e5f87c-d414-4533-89b6-2455d46a6bd2-kube-api-access-hnvf7\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.676501 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.676613 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-config-data\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.676652 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-scripts\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.676709 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/77e5f87c-d414-4533-89b6-2455d46a6bd2-log-httpd\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.778696 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/77e5f87c-d414-4533-89b6-2455d46a6bd2-run-httpd\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.778868 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnvf7\" (UniqueName: \"kubernetes.io/projected/77e5f87c-d414-4533-89b6-2455d46a6bd2-kube-api-access-hnvf7\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.779021 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.779081 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-config-data\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.779114 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-scripts\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.779168 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/77e5f87c-d414-4533-89b6-2455d46a6bd2-log-httpd\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.779253 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.779283 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.780631 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/77e5f87c-d414-4533-89b6-2455d46a6bd2-run-httpd\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.791030 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/77e5f87c-d414-4533-89b6-2455d46a6bd2-log-httpd\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.793038 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-scripts\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.794088 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.794216 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.795309 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.796104 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-config-data\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.806497 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnvf7\" (UniqueName: \"kubernetes.io/projected/77e5f87c-d414-4533-89b6-2455d46a6bd2-kube-api-access-hnvf7\") pod \"ceilometer-0\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.808812 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:20:30 crc kubenswrapper[5045]: I1125 23:20:30.928769 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.084769 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-42q8r\" (UniqueName: \"kubernetes.io/projected/a9163334-4411-4a03-9d40-8a86305dc8ee-kube-api-access-42q8r\") pod \"a9163334-4411-4a03-9d40-8a86305dc8ee\" (UID: \"a9163334-4411-4a03-9d40-8a86305dc8ee\") " Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.084801 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9163334-4411-4a03-9d40-8a86305dc8ee-config-data\") pod \"a9163334-4411-4a03-9d40-8a86305dc8ee\" (UID: \"a9163334-4411-4a03-9d40-8a86305dc8ee\") " Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.084830 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9163334-4411-4a03-9d40-8a86305dc8ee-logs\") pod \"a9163334-4411-4a03-9d40-8a86305dc8ee\" (UID: \"a9163334-4411-4a03-9d40-8a86305dc8ee\") " Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.084922 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9163334-4411-4a03-9d40-8a86305dc8ee-combined-ca-bundle\") pod \"a9163334-4411-4a03-9d40-8a86305dc8ee\" (UID: \"a9163334-4411-4a03-9d40-8a86305dc8ee\") " Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.087121 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9163334-4411-4a03-9d40-8a86305dc8ee-logs" (OuterVolumeSpecName: "logs") pod "a9163334-4411-4a03-9d40-8a86305dc8ee" (UID: "a9163334-4411-4a03-9d40-8a86305dc8ee"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.090933 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9163334-4411-4a03-9d40-8a86305dc8ee-kube-api-access-42q8r" (OuterVolumeSpecName: "kube-api-access-42q8r") pod "a9163334-4411-4a03-9d40-8a86305dc8ee" (UID: "a9163334-4411-4a03-9d40-8a86305dc8ee"). InnerVolumeSpecName "kube-api-access-42q8r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.118945 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9163334-4411-4a03-9d40-8a86305dc8ee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a9163334-4411-4a03-9d40-8a86305dc8ee" (UID: "a9163334-4411-4a03-9d40-8a86305dc8ee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.133772 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9163334-4411-4a03-9d40-8a86305dc8ee-config-data" (OuterVolumeSpecName: "config-data") pod "a9163334-4411-4a03-9d40-8a86305dc8ee" (UID: "a9163334-4411-4a03-9d40-8a86305dc8ee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.187081 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-42q8r\" (UniqueName: \"kubernetes.io/projected/a9163334-4411-4a03-9d40-8a86305dc8ee-kube-api-access-42q8r\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.187115 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9163334-4411-4a03-9d40-8a86305dc8ee-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.187125 5045 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9163334-4411-4a03-9d40-8a86305dc8ee-logs\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.187133 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9163334-4411-4a03-9d40-8a86305dc8ee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.272095 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.381781 5045 generic.go:334] "Generic (PLEG): container finished" podID="a9163334-4411-4a03-9d40-8a86305dc8ee" containerID="c64a1c1e0d3565d0789bffa029b2bfb511715015e4b102d8aa471580e05f62f3" exitCode=0 Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.381843 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a9163334-4411-4a03-9d40-8a86305dc8ee","Type":"ContainerDied","Data":"c64a1c1e0d3565d0789bffa029b2bfb511715015e4b102d8aa471580e05f62f3"} Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.381878 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a9163334-4411-4a03-9d40-8a86305dc8ee","Type":"ContainerDied","Data":"4e30ae40e501f2e886341dd9f780bc3fff12284bbcf5e128c3136b6dd7dd0319"} Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.381893 5045 scope.go:117] "RemoveContainer" containerID="c64a1c1e0d3565d0789bffa029b2bfb511715015e4b102d8aa471580e05f62f3" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.382830 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.382965 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"77e5f87c-d414-4533-89b6-2455d46a6bd2","Type":"ContainerStarted","Data":"eced0398bf8acc4f1e29baec55be4a20340c2faeb5ba20b9f040b197a7824a81"} Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.416094 5045 scope.go:117] "RemoveContainer" containerID="e7f235c87e34c945735fef653700403e6e6dda64d89f91c29b3c653e91821780" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.423880 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.434070 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.442116 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 23:20:31 crc kubenswrapper[5045]: E1125 23:20:31.442458 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9163334-4411-4a03-9d40-8a86305dc8ee" containerName="nova-api-api" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.442474 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9163334-4411-4a03-9d40-8a86305dc8ee" containerName="nova-api-api" Nov 25 23:20:31 crc kubenswrapper[5045]: E1125 23:20:31.442490 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9163334-4411-4a03-9d40-8a86305dc8ee" containerName="nova-api-log" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.442496 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9163334-4411-4a03-9d40-8a86305dc8ee" containerName="nova-api-log" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.442684 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9163334-4411-4a03-9d40-8a86305dc8ee" containerName="nova-api-api" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.442704 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9163334-4411-4a03-9d40-8a86305dc8ee" containerName="nova-api-log" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.445141 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.447359 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.447606 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.448596 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.449969 5045 scope.go:117] "RemoveContainer" containerID="c64a1c1e0d3565d0789bffa029b2bfb511715015e4b102d8aa471580e05f62f3" Nov 25 23:20:31 crc kubenswrapper[5045]: E1125 23:20:31.450474 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c64a1c1e0d3565d0789bffa029b2bfb511715015e4b102d8aa471580e05f62f3\": container with ID starting with c64a1c1e0d3565d0789bffa029b2bfb511715015e4b102d8aa471580e05f62f3 not found: ID does not exist" containerID="c64a1c1e0d3565d0789bffa029b2bfb511715015e4b102d8aa471580e05f62f3" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.450505 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c64a1c1e0d3565d0789bffa029b2bfb511715015e4b102d8aa471580e05f62f3"} err="failed to get container status \"c64a1c1e0d3565d0789bffa029b2bfb511715015e4b102d8aa471580e05f62f3\": rpc error: code = NotFound desc = could not find container \"c64a1c1e0d3565d0789bffa029b2bfb511715015e4b102d8aa471580e05f62f3\": container with ID starting with c64a1c1e0d3565d0789bffa029b2bfb511715015e4b102d8aa471580e05f62f3 not found: ID does not exist" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.450524 5045 scope.go:117] "RemoveContainer" containerID="e7f235c87e34c945735fef653700403e6e6dda64d89f91c29b3c653e91821780" Nov 25 23:20:31 crc kubenswrapper[5045]: E1125 23:20:31.451092 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7f235c87e34c945735fef653700403e6e6dda64d89f91c29b3c653e91821780\": container with ID starting with e7f235c87e34c945735fef653700403e6e6dda64d89f91c29b3c653e91821780 not found: ID does not exist" containerID="e7f235c87e34c945735fef653700403e6e6dda64d89f91c29b3c653e91821780" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.451120 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7f235c87e34c945735fef653700403e6e6dda64d89f91c29b3c653e91821780"} err="failed to get container status \"e7f235c87e34c945735fef653700403e6e6dda64d89f91c29b3c653e91821780\": rpc error: code = NotFound desc = could not find container \"e7f235c87e34c945735fef653700403e6e6dda64d89f91c29b3c653e91821780\": container with ID starting with e7f235c87e34c945735fef653700403e6e6dda64d89f91c29b3c653e91821780 not found: ID does not exist" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.453993 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.593647 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.593724 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-public-tls-certs\") pod \"nova-api-0\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.593744 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2xvk\" (UniqueName: \"kubernetes.io/projected/3f965705-8dcd-412b-8850-1e3fba1e9a95-kube-api-access-h2xvk\") pod \"nova-api-0\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.593820 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.593842 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-config-data\") pod \"nova-api-0\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.593906 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f965705-8dcd-412b-8850-1e3fba1e9a95-logs\") pod \"nova-api-0\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.695598 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-public-tls-certs\") pod \"nova-api-0\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.695629 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2xvk\" (UniqueName: \"kubernetes.io/projected/3f965705-8dcd-412b-8850-1e3fba1e9a95-kube-api-access-h2xvk\") pod \"nova-api-0\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.695677 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.695698 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-config-data\") pod \"nova-api-0\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.695773 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f965705-8dcd-412b-8850-1e3fba1e9a95-logs\") pod \"nova-api-0\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.695815 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.698290 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f965705-8dcd-412b-8850-1e3fba1e9a95-logs\") pod \"nova-api-0\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.700185 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.701132 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-config-data\") pod \"nova-api-0\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.701432 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.704185 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-public-tls-certs\") pod \"nova-api-0\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.717175 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2xvk\" (UniqueName: \"kubernetes.io/projected/3f965705-8dcd-412b-8850-1e3fba1e9a95-kube-api-access-h2xvk\") pod \"nova-api-0\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " pod="openstack/nova-api-0" Nov 25 23:20:31 crc kubenswrapper[5045]: I1125 23:20:31.763380 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 23:20:32 crc kubenswrapper[5045]: I1125 23:20:32.260621 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 23:20:32 crc kubenswrapper[5045]: I1125 23:20:32.434523 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7" path="/var/lib/kubelet/pods/6ed9c3c1-ea80-44cd-9149-8beaa7a58fc7/volumes" Nov 25 23:20:32 crc kubenswrapper[5045]: I1125 23:20:32.435665 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9163334-4411-4a03-9d40-8a86305dc8ee" path="/var/lib/kubelet/pods/a9163334-4411-4a03-9d40-8a86305dc8ee/volumes" Nov 25 23:20:32 crc kubenswrapper[5045]: I1125 23:20:32.441286 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3f965705-8dcd-412b-8850-1e3fba1e9a95","Type":"ContainerStarted","Data":"364b0bffe72726a22c4466c7c4bccdbe054b97df0318e44e0b42699bf4a592b0"} Nov 25 23:20:32 crc kubenswrapper[5045]: I1125 23:20:32.441323 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"77e5f87c-d414-4533-89b6-2455d46a6bd2","Type":"ContainerStarted","Data":"1039a1506b9671912793cc85799339f788c85ba91d3f3702398a85865258df8a"} Nov 25 23:20:33 crc kubenswrapper[5045]: I1125 23:20:33.414982 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"77e5f87c-d414-4533-89b6-2455d46a6bd2","Type":"ContainerStarted","Data":"ded0de1a0b65711262769d68815082ef49a0345cee62a7dde51af1d293b99f89"} Nov 25 23:20:33 crc kubenswrapper[5045]: I1125 23:20:33.417078 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3f965705-8dcd-412b-8850-1e3fba1e9a95","Type":"ContainerStarted","Data":"e1bec8e9279c430ec85212775f4274978ee40ccc39244d1912451ff478292963"} Nov 25 23:20:33 crc kubenswrapper[5045]: I1125 23:20:33.417348 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3f965705-8dcd-412b-8850-1e3fba1e9a95","Type":"ContainerStarted","Data":"6f4713845b4f8892869645c77d801cfecc0e2a87553c7e91a5fd85d56e8e1e6e"} Nov 25 23:20:33 crc kubenswrapper[5045]: I1125 23:20:33.442175 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.4421563920000002 podStartE2EDuration="2.442156392s" podCreationTimestamp="2025-11-25 23:20:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:20:33.441781911 +0000 UTC m=+1289.799441023" watchObservedRunningTime="2025-11-25 23:20:33.442156392 +0000 UTC m=+1289.799815494" Nov 25 23:20:33 crc kubenswrapper[5045]: I1125 23:20:33.733140 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:33 crc kubenswrapper[5045]: I1125 23:20:33.786874 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 23:20:33 crc kubenswrapper[5045]: I1125 23:20:33.787193 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 23:20:34 crc kubenswrapper[5045]: I1125 23:20:34.442231 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"77e5f87c-d414-4533-89b6-2455d46a6bd2","Type":"ContainerStarted","Data":"3afda70b7b37fbff4850dd32b60f45a370d53d21604b00c7803b42e31b5208c0"} Nov 25 23:20:34 crc kubenswrapper[5045]: I1125 23:20:34.807043 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:20:34 crc kubenswrapper[5045]: I1125 23:20:34.891556 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b8cf6657-z5dhk"] Nov 25 23:20:34 crc kubenswrapper[5045]: I1125 23:20:34.891802 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" podUID="0b4bf700-b50f-412f-bfb4-cf401cd618be" containerName="dnsmasq-dns" containerID="cri-o://e515920d996ecd8b21843715cd9e86946c21765714b297bcff33c23d8433d535" gracePeriod=10 Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.394839 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.450617 5045 generic.go:334] "Generic (PLEG): container finished" podID="0b4bf700-b50f-412f-bfb4-cf401cd618be" containerID="e515920d996ecd8b21843715cd9e86946c21765714b297bcff33c23d8433d535" exitCode=0 Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.450726 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.450745 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" event={"ID":"0b4bf700-b50f-412f-bfb4-cf401cd618be","Type":"ContainerDied","Data":"e515920d996ecd8b21843715cd9e86946c21765714b297bcff33c23d8433d535"} Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.451147 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b8cf6657-z5dhk" event={"ID":"0b4bf700-b50f-412f-bfb4-cf401cd618be","Type":"ContainerDied","Data":"1b51d476e1c54cbf2f95260bd6d40ca3efad4f54fe1db13a26b99caf2772bfa0"} Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.451166 5045 scope.go:117] "RemoveContainer" containerID="e515920d996ecd8b21843715cd9e86946c21765714b297bcff33c23d8433d535" Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.455160 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"77e5f87c-d414-4533-89b6-2455d46a6bd2","Type":"ContainerStarted","Data":"2a74d660476a94cef3409c9cb9f3be725285ae7d10f35fec7a55a171ca48fc6b"} Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.455362 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.476741 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.9235657160000001 podStartE2EDuration="5.476705929s" podCreationTimestamp="2025-11-25 23:20:30 +0000 UTC" firstStartedPulling="2025-11-25 23:20:31.279596334 +0000 UTC m=+1287.637255456" lastFinishedPulling="2025-11-25 23:20:34.832736557 +0000 UTC m=+1291.190395669" observedRunningTime="2025-11-25 23:20:35.47410647 +0000 UTC m=+1291.831765582" watchObservedRunningTime="2025-11-25 23:20:35.476705929 +0000 UTC m=+1291.834365041" Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.483289 5045 scope.go:117] "RemoveContainer" containerID="56deb0c2ad43ed4663f1adb7880bb90d45f08880be3072f0b423563877971139" Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.503664 5045 scope.go:117] "RemoveContainer" containerID="e515920d996ecd8b21843715cd9e86946c21765714b297bcff33c23d8433d535" Nov 25 23:20:35 crc kubenswrapper[5045]: E1125 23:20:35.504122 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e515920d996ecd8b21843715cd9e86946c21765714b297bcff33c23d8433d535\": container with ID starting with e515920d996ecd8b21843715cd9e86946c21765714b297bcff33c23d8433d535 not found: ID does not exist" containerID="e515920d996ecd8b21843715cd9e86946c21765714b297bcff33c23d8433d535" Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.504166 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e515920d996ecd8b21843715cd9e86946c21765714b297bcff33c23d8433d535"} err="failed to get container status \"e515920d996ecd8b21843715cd9e86946c21765714b297bcff33c23d8433d535\": rpc error: code = NotFound desc = could not find container \"e515920d996ecd8b21843715cd9e86946c21765714b297bcff33c23d8433d535\": container with ID starting with e515920d996ecd8b21843715cd9e86946c21765714b297bcff33c23d8433d535 not found: ID does not exist" Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.504194 5045 scope.go:117] "RemoveContainer" containerID="56deb0c2ad43ed4663f1adb7880bb90d45f08880be3072f0b423563877971139" Nov 25 23:20:35 crc kubenswrapper[5045]: E1125 23:20:35.504597 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56deb0c2ad43ed4663f1adb7880bb90d45f08880be3072f0b423563877971139\": container with ID starting with 56deb0c2ad43ed4663f1adb7880bb90d45f08880be3072f0b423563877971139 not found: ID does not exist" containerID="56deb0c2ad43ed4663f1adb7880bb90d45f08880be3072f0b423563877971139" Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.504632 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56deb0c2ad43ed4663f1adb7880bb90d45f08880be3072f0b423563877971139"} err="failed to get container status \"56deb0c2ad43ed4663f1adb7880bb90d45f08880be3072f0b423563877971139\": rpc error: code = NotFound desc = could not find container \"56deb0c2ad43ed4663f1adb7880bb90d45f08880be3072f0b423563877971139\": container with ID starting with 56deb0c2ad43ed4663f1adb7880bb90d45f08880be3072f0b423563877971139 not found: ID does not exist" Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.572313 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqlsb\" (UniqueName: \"kubernetes.io/projected/0b4bf700-b50f-412f-bfb4-cf401cd618be-kube-api-access-kqlsb\") pod \"0b4bf700-b50f-412f-bfb4-cf401cd618be\" (UID: \"0b4bf700-b50f-412f-bfb4-cf401cd618be\") " Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.572513 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-dns-svc\") pod \"0b4bf700-b50f-412f-bfb4-cf401cd618be\" (UID: \"0b4bf700-b50f-412f-bfb4-cf401cd618be\") " Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.572611 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-ovsdbserver-sb\") pod \"0b4bf700-b50f-412f-bfb4-cf401cd618be\" (UID: \"0b4bf700-b50f-412f-bfb4-cf401cd618be\") " Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.572657 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-ovsdbserver-nb\") pod \"0b4bf700-b50f-412f-bfb4-cf401cd618be\" (UID: \"0b4bf700-b50f-412f-bfb4-cf401cd618be\") " Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.572731 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-config\") pod \"0b4bf700-b50f-412f-bfb4-cf401cd618be\" (UID: \"0b4bf700-b50f-412f-bfb4-cf401cd618be\") " Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.578546 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b4bf700-b50f-412f-bfb4-cf401cd618be-kube-api-access-kqlsb" (OuterVolumeSpecName: "kube-api-access-kqlsb") pod "0b4bf700-b50f-412f-bfb4-cf401cd618be" (UID: "0b4bf700-b50f-412f-bfb4-cf401cd618be"). InnerVolumeSpecName "kube-api-access-kqlsb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.624649 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-config" (OuterVolumeSpecName: "config") pod "0b4bf700-b50f-412f-bfb4-cf401cd618be" (UID: "0b4bf700-b50f-412f-bfb4-cf401cd618be"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.626187 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0b4bf700-b50f-412f-bfb4-cf401cd618be" (UID: "0b4bf700-b50f-412f-bfb4-cf401cd618be"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.632916 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0b4bf700-b50f-412f-bfb4-cf401cd618be" (UID: "0b4bf700-b50f-412f-bfb4-cf401cd618be"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.653385 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0b4bf700-b50f-412f-bfb4-cf401cd618be" (UID: "0b4bf700-b50f-412f-bfb4-cf401cd618be"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.675165 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.675209 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kqlsb\" (UniqueName: \"kubernetes.io/projected/0b4bf700-b50f-412f-bfb4-cf401cd618be-kube-api-access-kqlsb\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.675228 5045 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.675245 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.675261 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0b4bf700-b50f-412f-bfb4-cf401cd618be-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.794014 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b8cf6657-z5dhk"] Nov 25 23:20:35 crc kubenswrapper[5045]: I1125 23:20:35.805155 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8b8cf6657-z5dhk"] Nov 25 23:20:36 crc kubenswrapper[5045]: I1125 23:20:36.409354 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b4bf700-b50f-412f-bfb4-cf401cd618be" path="/var/lib/kubelet/pods/0b4bf700-b50f-412f-bfb4-cf401cd618be/volumes" Nov 25 23:20:38 crc kubenswrapper[5045]: I1125 23:20:38.735427 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:38 crc kubenswrapper[5045]: I1125 23:20:38.765533 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:38 crc kubenswrapper[5045]: I1125 23:20:38.789402 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 23:20:38 crc kubenswrapper[5045]: I1125 23:20:38.790582 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 23:20:39 crc kubenswrapper[5045]: E1125 23:20:39.456415 5045 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod83215ee2_350f_4838_9973_6a8417a491d0.slice/crio-889b657a85e05fd36b37e3817014bb257099861292d346268b752ce369175e46\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod83215ee2_350f_4838_9973_6a8417a491d0.slice\": RecentStats: unable to find data in memory cache]" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.516631 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.700124 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-kbnmb"] Nov 25 23:20:39 crc kubenswrapper[5045]: E1125 23:20:39.702252 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b4bf700-b50f-412f-bfb4-cf401cd618be" containerName="dnsmasq-dns" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.702285 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b4bf700-b50f-412f-bfb4-cf401cd618be" containerName="dnsmasq-dns" Nov 25 23:20:39 crc kubenswrapper[5045]: E1125 23:20:39.702313 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b4bf700-b50f-412f-bfb4-cf401cd618be" containerName="init" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.702323 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b4bf700-b50f-412f-bfb4-cf401cd618be" containerName="init" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.702826 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b4bf700-b50f-412f-bfb4-cf401cd618be" containerName="dnsmasq-dns" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.703903 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-kbnmb" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.708991 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.715344 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.717865 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-kbnmb"] Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.797869 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="1aa735ee-28a3-4e0e-8f53-f2baea365124" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.181:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.797873 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="1aa735ee-28a3-4e0e-8f53-f2baea365124" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.181:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.858417 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q872m\" (UniqueName: \"kubernetes.io/projected/c385bb99-2964-4b34-b514-9d2c1a01f26d-kube-api-access-q872m\") pod \"nova-cell1-cell-mapping-kbnmb\" (UID: \"c385bb99-2964-4b34-b514-9d2c1a01f26d\") " pod="openstack/nova-cell1-cell-mapping-kbnmb" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.858706 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c385bb99-2964-4b34-b514-9d2c1a01f26d-config-data\") pod \"nova-cell1-cell-mapping-kbnmb\" (UID: \"c385bb99-2964-4b34-b514-9d2c1a01f26d\") " pod="openstack/nova-cell1-cell-mapping-kbnmb" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.858810 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c385bb99-2964-4b34-b514-9d2c1a01f26d-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-kbnmb\" (UID: \"c385bb99-2964-4b34-b514-9d2c1a01f26d\") " pod="openstack/nova-cell1-cell-mapping-kbnmb" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.858829 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c385bb99-2964-4b34-b514-9d2c1a01f26d-scripts\") pod \"nova-cell1-cell-mapping-kbnmb\" (UID: \"c385bb99-2964-4b34-b514-9d2c1a01f26d\") " pod="openstack/nova-cell1-cell-mapping-kbnmb" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.960183 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c385bb99-2964-4b34-b514-9d2c1a01f26d-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-kbnmb\" (UID: \"c385bb99-2964-4b34-b514-9d2c1a01f26d\") " pod="openstack/nova-cell1-cell-mapping-kbnmb" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.960227 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c385bb99-2964-4b34-b514-9d2c1a01f26d-scripts\") pod \"nova-cell1-cell-mapping-kbnmb\" (UID: \"c385bb99-2964-4b34-b514-9d2c1a01f26d\") " pod="openstack/nova-cell1-cell-mapping-kbnmb" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.960310 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q872m\" (UniqueName: \"kubernetes.io/projected/c385bb99-2964-4b34-b514-9d2c1a01f26d-kube-api-access-q872m\") pod \"nova-cell1-cell-mapping-kbnmb\" (UID: \"c385bb99-2964-4b34-b514-9d2c1a01f26d\") " pod="openstack/nova-cell1-cell-mapping-kbnmb" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.960329 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c385bb99-2964-4b34-b514-9d2c1a01f26d-config-data\") pod \"nova-cell1-cell-mapping-kbnmb\" (UID: \"c385bb99-2964-4b34-b514-9d2c1a01f26d\") " pod="openstack/nova-cell1-cell-mapping-kbnmb" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.966467 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c385bb99-2964-4b34-b514-9d2c1a01f26d-scripts\") pod \"nova-cell1-cell-mapping-kbnmb\" (UID: \"c385bb99-2964-4b34-b514-9d2c1a01f26d\") " pod="openstack/nova-cell1-cell-mapping-kbnmb" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.967269 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c385bb99-2964-4b34-b514-9d2c1a01f26d-config-data\") pod \"nova-cell1-cell-mapping-kbnmb\" (UID: \"c385bb99-2964-4b34-b514-9d2c1a01f26d\") " pod="openstack/nova-cell1-cell-mapping-kbnmb" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.973520 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c385bb99-2964-4b34-b514-9d2c1a01f26d-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-kbnmb\" (UID: \"c385bb99-2964-4b34-b514-9d2c1a01f26d\") " pod="openstack/nova-cell1-cell-mapping-kbnmb" Nov 25 23:20:39 crc kubenswrapper[5045]: I1125 23:20:39.980173 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q872m\" (UniqueName: \"kubernetes.io/projected/c385bb99-2964-4b34-b514-9d2c1a01f26d-kube-api-access-q872m\") pod \"nova-cell1-cell-mapping-kbnmb\" (UID: \"c385bb99-2964-4b34-b514-9d2c1a01f26d\") " pod="openstack/nova-cell1-cell-mapping-kbnmb" Nov 25 23:20:40 crc kubenswrapper[5045]: I1125 23:20:40.026352 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-kbnmb" Nov 25 23:20:40 crc kubenswrapper[5045]: I1125 23:20:40.516580 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-kbnmb"] Nov 25 23:20:41 crc kubenswrapper[5045]: I1125 23:20:41.525560 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-kbnmb" event={"ID":"c385bb99-2964-4b34-b514-9d2c1a01f26d","Type":"ContainerStarted","Data":"a7ac03776d3512855db8907eb27b209873d6c2af32991119985120a09547f792"} Nov 25 23:20:41 crc kubenswrapper[5045]: I1125 23:20:41.525952 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-kbnmb" event={"ID":"c385bb99-2964-4b34-b514-9d2c1a01f26d","Type":"ContainerStarted","Data":"ca307889f3cff8ba65ac41c3a3b2a147b42fa4a761e17d43067c278ae74bb2c1"} Nov 25 23:20:41 crc kubenswrapper[5045]: I1125 23:20:41.540805 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-kbnmb" podStartSLOduration=2.540781031 podStartE2EDuration="2.540781031s" podCreationTimestamp="2025-11-25 23:20:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:20:41.540611306 +0000 UTC m=+1297.898270458" watchObservedRunningTime="2025-11-25 23:20:41.540781031 +0000 UTC m=+1297.898440143" Nov 25 23:20:41 crc kubenswrapper[5045]: I1125 23:20:41.763780 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 23:20:41 crc kubenswrapper[5045]: I1125 23:20:41.763858 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 23:20:42 crc kubenswrapper[5045]: I1125 23:20:42.779963 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3f965705-8dcd-412b-8850-1e3fba1e9a95" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.183:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 23:20:42 crc kubenswrapper[5045]: I1125 23:20:42.779978 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3f965705-8dcd-412b-8850-1e3fba1e9a95" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.183:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 23:20:45 crc kubenswrapper[5045]: I1125 23:20:45.563832 5045 generic.go:334] "Generic (PLEG): container finished" podID="c385bb99-2964-4b34-b514-9d2c1a01f26d" containerID="a7ac03776d3512855db8907eb27b209873d6c2af32991119985120a09547f792" exitCode=0 Nov 25 23:20:45 crc kubenswrapper[5045]: I1125 23:20:45.563898 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-kbnmb" event={"ID":"c385bb99-2964-4b34-b514-9d2c1a01f26d","Type":"ContainerDied","Data":"a7ac03776d3512855db8907eb27b209873d6c2af32991119985120a09547f792"} Nov 25 23:20:46 crc kubenswrapper[5045]: I1125 23:20:46.986270 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-kbnmb" Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.108678 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q872m\" (UniqueName: \"kubernetes.io/projected/c385bb99-2964-4b34-b514-9d2c1a01f26d-kube-api-access-q872m\") pod \"c385bb99-2964-4b34-b514-9d2c1a01f26d\" (UID: \"c385bb99-2964-4b34-b514-9d2c1a01f26d\") " Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.108742 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c385bb99-2964-4b34-b514-9d2c1a01f26d-config-data\") pod \"c385bb99-2964-4b34-b514-9d2c1a01f26d\" (UID: \"c385bb99-2964-4b34-b514-9d2c1a01f26d\") " Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.108811 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c385bb99-2964-4b34-b514-9d2c1a01f26d-scripts\") pod \"c385bb99-2964-4b34-b514-9d2c1a01f26d\" (UID: \"c385bb99-2964-4b34-b514-9d2c1a01f26d\") " Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.108843 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c385bb99-2964-4b34-b514-9d2c1a01f26d-combined-ca-bundle\") pod \"c385bb99-2964-4b34-b514-9d2c1a01f26d\" (UID: \"c385bb99-2964-4b34-b514-9d2c1a01f26d\") " Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.113692 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c385bb99-2964-4b34-b514-9d2c1a01f26d-kube-api-access-q872m" (OuterVolumeSpecName: "kube-api-access-q872m") pod "c385bb99-2964-4b34-b514-9d2c1a01f26d" (UID: "c385bb99-2964-4b34-b514-9d2c1a01f26d"). InnerVolumeSpecName "kube-api-access-q872m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.115544 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c385bb99-2964-4b34-b514-9d2c1a01f26d-scripts" (OuterVolumeSpecName: "scripts") pod "c385bb99-2964-4b34-b514-9d2c1a01f26d" (UID: "c385bb99-2964-4b34-b514-9d2c1a01f26d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.138128 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c385bb99-2964-4b34-b514-9d2c1a01f26d-config-data" (OuterVolumeSpecName: "config-data") pod "c385bb99-2964-4b34-b514-9d2c1a01f26d" (UID: "c385bb99-2964-4b34-b514-9d2c1a01f26d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.161180 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c385bb99-2964-4b34-b514-9d2c1a01f26d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c385bb99-2964-4b34-b514-9d2c1a01f26d" (UID: "c385bb99-2964-4b34-b514-9d2c1a01f26d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.211259 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q872m\" (UniqueName: \"kubernetes.io/projected/c385bb99-2964-4b34-b514-9d2c1a01f26d-kube-api-access-q872m\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.211292 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c385bb99-2964-4b34-b514-9d2c1a01f26d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.211304 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c385bb99-2964-4b34-b514-9d2c1a01f26d-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.211312 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c385bb99-2964-4b34-b514-9d2c1a01f26d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.586265 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-kbnmb" event={"ID":"c385bb99-2964-4b34-b514-9d2c1a01f26d","Type":"ContainerDied","Data":"ca307889f3cff8ba65ac41c3a3b2a147b42fa4a761e17d43067c278ae74bb2c1"} Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.586304 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca307889f3cff8ba65ac41c3a3b2a147b42fa4a761e17d43067c278ae74bb2c1" Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.586343 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-kbnmb" Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.776745 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.777067 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3f965705-8dcd-412b-8850-1e3fba1e9a95" containerName="nova-api-log" containerID="cri-o://6f4713845b4f8892869645c77d801cfecc0e2a87553c7e91a5fd85d56e8e1e6e" gracePeriod=30 Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.777146 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3f965705-8dcd-412b-8850-1e3fba1e9a95" containerName="nova-api-api" containerID="cri-o://e1bec8e9279c430ec85212775f4274978ee40ccc39244d1912451ff478292963" gracePeriod=30 Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.792807 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.793045 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="52ab23f2-ab24-405e-8a1c-cdac875e433b" containerName="nova-scheduler-scheduler" containerID="cri-o://41f58a026e5fed932a73b18d41697680d94490d3152810196ebae7d402af2b47" gracePeriod=30 Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.802424 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.802665 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="1aa735ee-28a3-4e0e-8f53-f2baea365124" containerName="nova-metadata-log" containerID="cri-o://6098da21ba788204646680dbb9540eaa2bb58907c3e38b3b3bd6799cf5396ded" gracePeriod=30 Nov 25 23:20:47 crc kubenswrapper[5045]: I1125 23:20:47.804187 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="1aa735ee-28a3-4e0e-8f53-f2baea365124" containerName="nova-metadata-metadata" containerID="cri-o://38d8282b9e42d29e7bf4a2bea6ecb31922b0d70dd440b9a488927bb8cfea82fa" gracePeriod=30 Nov 25 23:20:48 crc kubenswrapper[5045]: I1125 23:20:48.598761 5045 generic.go:334] "Generic (PLEG): container finished" podID="1aa735ee-28a3-4e0e-8f53-f2baea365124" containerID="6098da21ba788204646680dbb9540eaa2bb58907c3e38b3b3bd6799cf5396ded" exitCode=143 Nov 25 23:20:48 crc kubenswrapper[5045]: I1125 23:20:48.599099 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1aa735ee-28a3-4e0e-8f53-f2baea365124","Type":"ContainerDied","Data":"6098da21ba788204646680dbb9540eaa2bb58907c3e38b3b3bd6799cf5396ded"} Nov 25 23:20:48 crc kubenswrapper[5045]: I1125 23:20:48.601352 5045 generic.go:334] "Generic (PLEG): container finished" podID="3f965705-8dcd-412b-8850-1e3fba1e9a95" containerID="6f4713845b4f8892869645c77d801cfecc0e2a87553c7e91a5fd85d56e8e1e6e" exitCode=143 Nov 25 23:20:48 crc kubenswrapper[5045]: I1125 23:20:48.601385 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3f965705-8dcd-412b-8850-1e3fba1e9a95","Type":"ContainerDied","Data":"6f4713845b4f8892869645c77d801cfecc0e2a87553c7e91a5fd85d56e8e1e6e"} Nov 25 23:20:49 crc kubenswrapper[5045]: I1125 23:20:49.613288 5045 generic.go:334] "Generic (PLEG): container finished" podID="52ab23f2-ab24-405e-8a1c-cdac875e433b" containerID="41f58a026e5fed932a73b18d41697680d94490d3152810196ebae7d402af2b47" exitCode=0 Nov 25 23:20:49 crc kubenswrapper[5045]: I1125 23:20:49.613344 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"52ab23f2-ab24-405e-8a1c-cdac875e433b","Type":"ContainerDied","Data":"41f58a026e5fed932a73b18d41697680d94490d3152810196ebae7d402af2b47"} Nov 25 23:20:49 crc kubenswrapper[5045]: I1125 23:20:49.716406 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 23:20:49 crc kubenswrapper[5045]: E1125 23:20:49.730348 5045 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod83215ee2_350f_4838_9973_6a8417a491d0.slice/crio-889b657a85e05fd36b37e3817014bb257099861292d346268b752ce369175e46\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod83215ee2_350f_4838_9973_6a8417a491d0.slice\": RecentStats: unable to find data in memory cache]" Nov 25 23:20:49 crc kubenswrapper[5045]: I1125 23:20:49.862097 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ab23f2-ab24-405e-8a1c-cdac875e433b-combined-ca-bundle\") pod \"52ab23f2-ab24-405e-8a1c-cdac875e433b\" (UID: \"52ab23f2-ab24-405e-8a1c-cdac875e433b\") " Nov 25 23:20:49 crc kubenswrapper[5045]: I1125 23:20:49.862291 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pqkf7\" (UniqueName: \"kubernetes.io/projected/52ab23f2-ab24-405e-8a1c-cdac875e433b-kube-api-access-pqkf7\") pod \"52ab23f2-ab24-405e-8a1c-cdac875e433b\" (UID: \"52ab23f2-ab24-405e-8a1c-cdac875e433b\") " Nov 25 23:20:49 crc kubenswrapper[5045]: I1125 23:20:49.862372 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ab23f2-ab24-405e-8a1c-cdac875e433b-config-data\") pod \"52ab23f2-ab24-405e-8a1c-cdac875e433b\" (UID: \"52ab23f2-ab24-405e-8a1c-cdac875e433b\") " Nov 25 23:20:49 crc kubenswrapper[5045]: I1125 23:20:49.868409 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52ab23f2-ab24-405e-8a1c-cdac875e433b-kube-api-access-pqkf7" (OuterVolumeSpecName: "kube-api-access-pqkf7") pod "52ab23f2-ab24-405e-8a1c-cdac875e433b" (UID: "52ab23f2-ab24-405e-8a1c-cdac875e433b"). InnerVolumeSpecName "kube-api-access-pqkf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:20:49 crc kubenswrapper[5045]: I1125 23:20:49.891457 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52ab23f2-ab24-405e-8a1c-cdac875e433b-config-data" (OuterVolumeSpecName: "config-data") pod "52ab23f2-ab24-405e-8a1c-cdac875e433b" (UID: "52ab23f2-ab24-405e-8a1c-cdac875e433b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:49 crc kubenswrapper[5045]: I1125 23:20:49.904311 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52ab23f2-ab24-405e-8a1c-cdac875e433b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "52ab23f2-ab24-405e-8a1c-cdac875e433b" (UID: "52ab23f2-ab24-405e-8a1c-cdac875e433b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:49 crc kubenswrapper[5045]: I1125 23:20:49.964494 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ab23f2-ab24-405e-8a1c-cdac875e433b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:49 crc kubenswrapper[5045]: I1125 23:20:49.964527 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pqkf7\" (UniqueName: \"kubernetes.io/projected/52ab23f2-ab24-405e-8a1c-cdac875e433b-kube-api-access-pqkf7\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:49 crc kubenswrapper[5045]: I1125 23:20:49.964537 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ab23f2-ab24-405e-8a1c-cdac875e433b-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.625447 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"52ab23f2-ab24-405e-8a1c-cdac875e433b","Type":"ContainerDied","Data":"2e79cd3e45219672233acce2d457e0710c5bd3e8dd5de8ed385bcb50e77cada7"} Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.625511 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.625836 5045 scope.go:117] "RemoveContainer" containerID="41f58a026e5fed932a73b18d41697680d94490d3152810196ebae7d402af2b47" Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.653071 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.660626 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.675782 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 23:20:50 crc kubenswrapper[5045]: E1125 23:20:50.676186 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c385bb99-2964-4b34-b514-9d2c1a01f26d" containerName="nova-manage" Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.676208 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="c385bb99-2964-4b34-b514-9d2c1a01f26d" containerName="nova-manage" Nov 25 23:20:50 crc kubenswrapper[5045]: E1125 23:20:50.676242 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52ab23f2-ab24-405e-8a1c-cdac875e433b" containerName="nova-scheduler-scheduler" Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.676252 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="52ab23f2-ab24-405e-8a1c-cdac875e433b" containerName="nova-scheduler-scheduler" Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.676468 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="52ab23f2-ab24-405e-8a1c-cdac875e433b" containerName="nova-scheduler-scheduler" Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.676492 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="c385bb99-2964-4b34-b514-9d2c1a01f26d" containerName="nova-manage" Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.678020 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.682794 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.685454 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.778985 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fc80e12-1f82-458d-a8c1-4e7625a9381c-config-data\") pod \"nova-scheduler-0\" (UID: \"4fc80e12-1f82-458d-a8c1-4e7625a9381c\") " pod="openstack/nova-scheduler-0" Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.779057 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pgmw4\" (UniqueName: \"kubernetes.io/projected/4fc80e12-1f82-458d-a8c1-4e7625a9381c-kube-api-access-pgmw4\") pod \"nova-scheduler-0\" (UID: \"4fc80e12-1f82-458d-a8c1-4e7625a9381c\") " pod="openstack/nova-scheduler-0" Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.779150 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fc80e12-1f82-458d-a8c1-4e7625a9381c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4fc80e12-1f82-458d-a8c1-4e7625a9381c\") " pod="openstack/nova-scheduler-0" Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.880942 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fc80e12-1f82-458d-a8c1-4e7625a9381c-config-data\") pod \"nova-scheduler-0\" (UID: \"4fc80e12-1f82-458d-a8c1-4e7625a9381c\") " pod="openstack/nova-scheduler-0" Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.881059 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pgmw4\" (UniqueName: \"kubernetes.io/projected/4fc80e12-1f82-458d-a8c1-4e7625a9381c-kube-api-access-pgmw4\") pod \"nova-scheduler-0\" (UID: \"4fc80e12-1f82-458d-a8c1-4e7625a9381c\") " pod="openstack/nova-scheduler-0" Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.881272 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fc80e12-1f82-458d-a8c1-4e7625a9381c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4fc80e12-1f82-458d-a8c1-4e7625a9381c\") " pod="openstack/nova-scheduler-0" Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.887849 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fc80e12-1f82-458d-a8c1-4e7625a9381c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4fc80e12-1f82-458d-a8c1-4e7625a9381c\") " pod="openstack/nova-scheduler-0" Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.888003 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fc80e12-1f82-458d-a8c1-4e7625a9381c-config-data\") pod \"nova-scheduler-0\" (UID: \"4fc80e12-1f82-458d-a8c1-4e7625a9381c\") " pod="openstack/nova-scheduler-0" Nov 25 23:20:50 crc kubenswrapper[5045]: I1125 23:20:50.931366 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pgmw4\" (UniqueName: \"kubernetes.io/projected/4fc80e12-1f82-458d-a8c1-4e7625a9381c-kube-api-access-pgmw4\") pod \"nova-scheduler-0\" (UID: \"4fc80e12-1f82-458d-a8c1-4e7625a9381c\") " pod="openstack/nova-scheduler-0" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.002455 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.393860 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.443680 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.495350 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-combined-ca-bundle\") pod \"3f965705-8dcd-412b-8850-1e3fba1e9a95\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.495391 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-config-data\") pod \"3f965705-8dcd-412b-8850-1e3fba1e9a95\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.495436 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f965705-8dcd-412b-8850-1e3fba1e9a95-logs\") pod \"3f965705-8dcd-412b-8850-1e3fba1e9a95\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.495459 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h2xvk\" (UniqueName: \"kubernetes.io/projected/3f965705-8dcd-412b-8850-1e3fba1e9a95-kube-api-access-h2xvk\") pod \"3f965705-8dcd-412b-8850-1e3fba1e9a95\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.495569 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-internal-tls-certs\") pod \"3f965705-8dcd-412b-8850-1e3fba1e9a95\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.495592 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-public-tls-certs\") pod \"3f965705-8dcd-412b-8850-1e3fba1e9a95\" (UID: \"3f965705-8dcd-412b-8850-1e3fba1e9a95\") " Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.496666 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f965705-8dcd-412b-8850-1e3fba1e9a95-logs" (OuterVolumeSpecName: "logs") pod "3f965705-8dcd-412b-8850-1e3fba1e9a95" (UID: "3f965705-8dcd-412b-8850-1e3fba1e9a95"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.497825 5045 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f965705-8dcd-412b-8850-1e3fba1e9a95-logs\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.516833 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f965705-8dcd-412b-8850-1e3fba1e9a95-kube-api-access-h2xvk" (OuterVolumeSpecName: "kube-api-access-h2xvk") pod "3f965705-8dcd-412b-8850-1e3fba1e9a95" (UID: "3f965705-8dcd-412b-8850-1e3fba1e9a95"). InnerVolumeSpecName "kube-api-access-h2xvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.524839 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-config-data" (OuterVolumeSpecName: "config-data") pod "3f965705-8dcd-412b-8850-1e3fba1e9a95" (UID: "3f965705-8dcd-412b-8850-1e3fba1e9a95"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.550435 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f965705-8dcd-412b-8850-1e3fba1e9a95" (UID: "3f965705-8dcd-412b-8850-1e3fba1e9a95"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.551994 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "3f965705-8dcd-412b-8850-1e3fba1e9a95" (UID: "3f965705-8dcd-412b-8850-1e3fba1e9a95"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.564157 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3f965705-8dcd-412b-8850-1e3fba1e9a95" (UID: "3f965705-8dcd-412b-8850-1e3fba1e9a95"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.592664 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 23:20:51 crc kubenswrapper[5045]: W1125 23:20:51.593920 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fc80e12_1f82_458d_a8c1_4e7625a9381c.slice/crio-1b3a253ce65a68c6cda2f9f80b40a2b7f0b9fa12f6c05c3926fabcc1f5aeb8db WatchSource:0}: Error finding container 1b3a253ce65a68c6cda2f9f80b40a2b7f0b9fa12f6c05c3926fabcc1f5aeb8db: Status 404 returned error can't find the container with id 1b3a253ce65a68c6cda2f9f80b40a2b7f0b9fa12f6c05c3926fabcc1f5aeb8db Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.598546 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aa735ee-28a3-4e0e-8f53-f2baea365124-combined-ca-bundle\") pod \"1aa735ee-28a3-4e0e-8f53-f2baea365124\" (UID: \"1aa735ee-28a3-4e0e-8f53-f2baea365124\") " Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.598618 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1aa735ee-28a3-4e0e-8f53-f2baea365124-logs\") pod \"1aa735ee-28a3-4e0e-8f53-f2baea365124\" (UID: \"1aa735ee-28a3-4e0e-8f53-f2baea365124\") " Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.598652 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aa735ee-28a3-4e0e-8f53-f2baea365124-nova-metadata-tls-certs\") pod \"1aa735ee-28a3-4e0e-8f53-f2baea365124\" (UID: \"1aa735ee-28a3-4e0e-8f53-f2baea365124\") " Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.598756 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xg8c\" (UniqueName: \"kubernetes.io/projected/1aa735ee-28a3-4e0e-8f53-f2baea365124-kube-api-access-8xg8c\") pod \"1aa735ee-28a3-4e0e-8f53-f2baea365124\" (UID: \"1aa735ee-28a3-4e0e-8f53-f2baea365124\") " Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.599115 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1aa735ee-28a3-4e0e-8f53-f2baea365124-logs" (OuterVolumeSpecName: "logs") pod "1aa735ee-28a3-4e0e-8f53-f2baea365124" (UID: "1aa735ee-28a3-4e0e-8f53-f2baea365124"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.599221 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1aa735ee-28a3-4e0e-8f53-f2baea365124-config-data\") pod \"1aa735ee-28a3-4e0e-8f53-f2baea365124\" (UID: \"1aa735ee-28a3-4e0e-8f53-f2baea365124\") " Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.599732 5045 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1aa735ee-28a3-4e0e-8f53-f2baea365124-logs\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.599748 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.599760 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.599768 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h2xvk\" (UniqueName: \"kubernetes.io/projected/3f965705-8dcd-412b-8850-1e3fba1e9a95-kube-api-access-h2xvk\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.599779 5045 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.599787 5045 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f965705-8dcd-412b-8850-1e3fba1e9a95-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.601799 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1aa735ee-28a3-4e0e-8f53-f2baea365124-kube-api-access-8xg8c" (OuterVolumeSpecName: "kube-api-access-8xg8c") pod "1aa735ee-28a3-4e0e-8f53-f2baea365124" (UID: "1aa735ee-28a3-4e0e-8f53-f2baea365124"). InnerVolumeSpecName "kube-api-access-8xg8c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.619614 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1aa735ee-28a3-4e0e-8f53-f2baea365124-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1aa735ee-28a3-4e0e-8f53-f2baea365124" (UID: "1aa735ee-28a3-4e0e-8f53-f2baea365124"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.624206 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1aa735ee-28a3-4e0e-8f53-f2baea365124-config-data" (OuterVolumeSpecName: "config-data") pod "1aa735ee-28a3-4e0e-8f53-f2baea365124" (UID: "1aa735ee-28a3-4e0e-8f53-f2baea365124"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.634369 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4fc80e12-1f82-458d-a8c1-4e7625a9381c","Type":"ContainerStarted","Data":"1b3a253ce65a68c6cda2f9f80b40a2b7f0b9fa12f6c05c3926fabcc1f5aeb8db"} Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.636419 5045 generic.go:334] "Generic (PLEG): container finished" podID="1aa735ee-28a3-4e0e-8f53-f2baea365124" containerID="38d8282b9e42d29e7bf4a2bea6ecb31922b0d70dd440b9a488927bb8cfea82fa" exitCode=0 Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.636459 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1aa735ee-28a3-4e0e-8f53-f2baea365124","Type":"ContainerDied","Data":"38d8282b9e42d29e7bf4a2bea6ecb31922b0d70dd440b9a488927bb8cfea82fa"} Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.636511 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1aa735ee-28a3-4e0e-8f53-f2baea365124","Type":"ContainerDied","Data":"779c640e5721f199c5c25b3798d9db0cc2817a4dc03db1a487d2ba7a0ff86b94"} Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.636533 5045 scope.go:117] "RemoveContainer" containerID="38d8282b9e42d29e7bf4a2bea6ecb31922b0d70dd440b9a488927bb8cfea82fa" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.636546 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.639668 5045 generic.go:334] "Generic (PLEG): container finished" podID="3f965705-8dcd-412b-8850-1e3fba1e9a95" containerID="e1bec8e9279c430ec85212775f4274978ee40ccc39244d1912451ff478292963" exitCode=0 Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.639703 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3f965705-8dcd-412b-8850-1e3fba1e9a95","Type":"ContainerDied","Data":"e1bec8e9279c430ec85212775f4274978ee40ccc39244d1912451ff478292963"} Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.639750 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3f965705-8dcd-412b-8850-1e3fba1e9a95","Type":"ContainerDied","Data":"364b0bffe72726a22c4466c7c4bccdbe054b97df0318e44e0b42699bf4a592b0"} Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.639788 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.655524 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1aa735ee-28a3-4e0e-8f53-f2baea365124-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "1aa735ee-28a3-4e0e-8f53-f2baea365124" (UID: "1aa735ee-28a3-4e0e-8f53-f2baea365124"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.658666 5045 scope.go:117] "RemoveContainer" containerID="6098da21ba788204646680dbb9540eaa2bb58907c3e38b3b3bd6799cf5396ded" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.691636 5045 scope.go:117] "RemoveContainer" containerID="38d8282b9e42d29e7bf4a2bea6ecb31922b0d70dd440b9a488927bb8cfea82fa" Nov 25 23:20:51 crc kubenswrapper[5045]: E1125 23:20:51.693461 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38d8282b9e42d29e7bf4a2bea6ecb31922b0d70dd440b9a488927bb8cfea82fa\": container with ID starting with 38d8282b9e42d29e7bf4a2bea6ecb31922b0d70dd440b9a488927bb8cfea82fa not found: ID does not exist" containerID="38d8282b9e42d29e7bf4a2bea6ecb31922b0d70dd440b9a488927bb8cfea82fa" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.693504 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38d8282b9e42d29e7bf4a2bea6ecb31922b0d70dd440b9a488927bb8cfea82fa"} err="failed to get container status \"38d8282b9e42d29e7bf4a2bea6ecb31922b0d70dd440b9a488927bb8cfea82fa\": rpc error: code = NotFound desc = could not find container \"38d8282b9e42d29e7bf4a2bea6ecb31922b0d70dd440b9a488927bb8cfea82fa\": container with ID starting with 38d8282b9e42d29e7bf4a2bea6ecb31922b0d70dd440b9a488927bb8cfea82fa not found: ID does not exist" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.693529 5045 scope.go:117] "RemoveContainer" containerID="6098da21ba788204646680dbb9540eaa2bb58907c3e38b3b3bd6799cf5396ded" Nov 25 23:20:51 crc kubenswrapper[5045]: E1125 23:20:51.693944 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6098da21ba788204646680dbb9540eaa2bb58907c3e38b3b3bd6799cf5396ded\": container with ID starting with 6098da21ba788204646680dbb9540eaa2bb58907c3e38b3b3bd6799cf5396ded not found: ID does not exist" containerID="6098da21ba788204646680dbb9540eaa2bb58907c3e38b3b3bd6799cf5396ded" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.693984 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6098da21ba788204646680dbb9540eaa2bb58907c3e38b3b3bd6799cf5396ded"} err="failed to get container status \"6098da21ba788204646680dbb9540eaa2bb58907c3e38b3b3bd6799cf5396ded\": rpc error: code = NotFound desc = could not find container \"6098da21ba788204646680dbb9540eaa2bb58907c3e38b3b3bd6799cf5396ded\": container with ID starting with 6098da21ba788204646680dbb9540eaa2bb58907c3e38b3b3bd6799cf5396ded not found: ID does not exist" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.694030 5045 scope.go:117] "RemoveContainer" containerID="e1bec8e9279c430ec85212775f4274978ee40ccc39244d1912451ff478292963" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.701220 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.701576 5045 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aa735ee-28a3-4e0e-8f53-f2baea365124-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.701614 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xg8c\" (UniqueName: \"kubernetes.io/projected/1aa735ee-28a3-4e0e-8f53-f2baea365124-kube-api-access-8xg8c\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.701628 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1aa735ee-28a3-4e0e-8f53-f2baea365124-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.701641 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aa735ee-28a3-4e0e-8f53-f2baea365124-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.711816 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.719217 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 23:20:51 crc kubenswrapper[5045]: E1125 23:20:51.719672 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1aa735ee-28a3-4e0e-8f53-f2baea365124" containerName="nova-metadata-metadata" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.719694 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="1aa735ee-28a3-4e0e-8f53-f2baea365124" containerName="nova-metadata-metadata" Nov 25 23:20:51 crc kubenswrapper[5045]: E1125 23:20:51.719736 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f965705-8dcd-412b-8850-1e3fba1e9a95" containerName="nova-api-log" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.719748 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f965705-8dcd-412b-8850-1e3fba1e9a95" containerName="nova-api-log" Nov 25 23:20:51 crc kubenswrapper[5045]: E1125 23:20:51.719778 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f965705-8dcd-412b-8850-1e3fba1e9a95" containerName="nova-api-api" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.719785 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f965705-8dcd-412b-8850-1e3fba1e9a95" containerName="nova-api-api" Nov 25 23:20:51 crc kubenswrapper[5045]: E1125 23:20:51.719809 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1aa735ee-28a3-4e0e-8f53-f2baea365124" containerName="nova-metadata-log" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.719816 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="1aa735ee-28a3-4e0e-8f53-f2baea365124" containerName="nova-metadata-log" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.720045 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="1aa735ee-28a3-4e0e-8f53-f2baea365124" containerName="nova-metadata-metadata" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.720061 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f965705-8dcd-412b-8850-1e3fba1e9a95" containerName="nova-api-api" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.720092 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f965705-8dcd-412b-8850-1e3fba1e9a95" containerName="nova-api-log" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.720107 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="1aa735ee-28a3-4e0e-8f53-f2baea365124" containerName="nova-metadata-log" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.721518 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.725895 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.726383 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.726624 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.728069 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.735137 5045 scope.go:117] "RemoveContainer" containerID="6f4713845b4f8892869645c77d801cfecc0e2a87553c7e91a5fd85d56e8e1e6e" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.761995 5045 scope.go:117] "RemoveContainer" containerID="e1bec8e9279c430ec85212775f4274978ee40ccc39244d1912451ff478292963" Nov 25 23:20:51 crc kubenswrapper[5045]: E1125 23:20:51.762558 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1bec8e9279c430ec85212775f4274978ee40ccc39244d1912451ff478292963\": container with ID starting with e1bec8e9279c430ec85212775f4274978ee40ccc39244d1912451ff478292963 not found: ID does not exist" containerID="e1bec8e9279c430ec85212775f4274978ee40ccc39244d1912451ff478292963" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.762644 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1bec8e9279c430ec85212775f4274978ee40ccc39244d1912451ff478292963"} err="failed to get container status \"e1bec8e9279c430ec85212775f4274978ee40ccc39244d1912451ff478292963\": rpc error: code = NotFound desc = could not find container \"e1bec8e9279c430ec85212775f4274978ee40ccc39244d1912451ff478292963\": container with ID starting with e1bec8e9279c430ec85212775f4274978ee40ccc39244d1912451ff478292963 not found: ID does not exist" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.762672 5045 scope.go:117] "RemoveContainer" containerID="6f4713845b4f8892869645c77d801cfecc0e2a87553c7e91a5fd85d56e8e1e6e" Nov 25 23:20:51 crc kubenswrapper[5045]: E1125 23:20:51.762962 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f4713845b4f8892869645c77d801cfecc0e2a87553c7e91a5fd85d56e8e1e6e\": container with ID starting with 6f4713845b4f8892869645c77d801cfecc0e2a87553c7e91a5fd85d56e8e1e6e not found: ID does not exist" containerID="6f4713845b4f8892869645c77d801cfecc0e2a87553c7e91a5fd85d56e8e1e6e" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.763009 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f4713845b4f8892869645c77d801cfecc0e2a87553c7e91a5fd85d56e8e1e6e"} err="failed to get container status \"6f4713845b4f8892869645c77d801cfecc0e2a87553c7e91a5fd85d56e8e1e6e\": rpc error: code = NotFound desc = could not find container \"6f4713845b4f8892869645c77d801cfecc0e2a87553c7e91a5fd85d56e8e1e6e\": container with ID starting with 6f4713845b4f8892869645c77d801cfecc0e2a87553c7e91a5fd85d56e8e1e6e not found: ID does not exist" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.905396 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65d3b56d-211c-4392-b6ee-449e68d546a0-config-data\") pod \"nova-api-0\" (UID: \"65d3b56d-211c-4392-b6ee-449e68d546a0\") " pod="openstack/nova-api-0" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.905633 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8c9g\" (UniqueName: \"kubernetes.io/projected/65d3b56d-211c-4392-b6ee-449e68d546a0-kube-api-access-f8c9g\") pod \"nova-api-0\" (UID: \"65d3b56d-211c-4392-b6ee-449e68d546a0\") " pod="openstack/nova-api-0" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.905723 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/65d3b56d-211c-4392-b6ee-449e68d546a0-public-tls-certs\") pod \"nova-api-0\" (UID: \"65d3b56d-211c-4392-b6ee-449e68d546a0\") " pod="openstack/nova-api-0" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.905842 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/65d3b56d-211c-4392-b6ee-449e68d546a0-internal-tls-certs\") pod \"nova-api-0\" (UID: \"65d3b56d-211c-4392-b6ee-449e68d546a0\") " pod="openstack/nova-api-0" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.905930 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65d3b56d-211c-4392-b6ee-449e68d546a0-logs\") pod \"nova-api-0\" (UID: \"65d3b56d-211c-4392-b6ee-449e68d546a0\") " pod="openstack/nova-api-0" Nov 25 23:20:51 crc kubenswrapper[5045]: I1125 23:20:51.906084 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65d3b56d-211c-4392-b6ee-449e68d546a0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"65d3b56d-211c-4392-b6ee-449e68d546a0\") " pod="openstack/nova-api-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.007453 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65d3b56d-211c-4392-b6ee-449e68d546a0-config-data\") pod \"nova-api-0\" (UID: \"65d3b56d-211c-4392-b6ee-449e68d546a0\") " pod="openstack/nova-api-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.007827 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8c9g\" (UniqueName: \"kubernetes.io/projected/65d3b56d-211c-4392-b6ee-449e68d546a0-kube-api-access-f8c9g\") pod \"nova-api-0\" (UID: \"65d3b56d-211c-4392-b6ee-449e68d546a0\") " pod="openstack/nova-api-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.007914 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/65d3b56d-211c-4392-b6ee-449e68d546a0-public-tls-certs\") pod \"nova-api-0\" (UID: \"65d3b56d-211c-4392-b6ee-449e68d546a0\") " pod="openstack/nova-api-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.008424 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/65d3b56d-211c-4392-b6ee-449e68d546a0-internal-tls-certs\") pod \"nova-api-0\" (UID: \"65d3b56d-211c-4392-b6ee-449e68d546a0\") " pod="openstack/nova-api-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.008515 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65d3b56d-211c-4392-b6ee-449e68d546a0-logs\") pod \"nova-api-0\" (UID: \"65d3b56d-211c-4392-b6ee-449e68d546a0\") " pod="openstack/nova-api-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.008646 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65d3b56d-211c-4392-b6ee-449e68d546a0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"65d3b56d-211c-4392-b6ee-449e68d546a0\") " pod="openstack/nova-api-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.009002 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65d3b56d-211c-4392-b6ee-449e68d546a0-logs\") pod \"nova-api-0\" (UID: \"65d3b56d-211c-4392-b6ee-449e68d546a0\") " pod="openstack/nova-api-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.012197 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65d3b56d-211c-4392-b6ee-449e68d546a0-config-data\") pod \"nova-api-0\" (UID: \"65d3b56d-211c-4392-b6ee-449e68d546a0\") " pod="openstack/nova-api-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.014654 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/65d3b56d-211c-4392-b6ee-449e68d546a0-internal-tls-certs\") pod \"nova-api-0\" (UID: \"65d3b56d-211c-4392-b6ee-449e68d546a0\") " pod="openstack/nova-api-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.015549 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65d3b56d-211c-4392-b6ee-449e68d546a0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"65d3b56d-211c-4392-b6ee-449e68d546a0\") " pod="openstack/nova-api-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.022113 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.023531 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/65d3b56d-211c-4392-b6ee-449e68d546a0-public-tls-certs\") pod \"nova-api-0\" (UID: \"65d3b56d-211c-4392-b6ee-449e68d546a0\") " pod="openstack/nova-api-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.038602 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.041027 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8c9g\" (UniqueName: \"kubernetes.io/projected/65d3b56d-211c-4392-b6ee-449e68d546a0-kube-api-access-f8c9g\") pod \"nova-api-0\" (UID: \"65d3b56d-211c-4392-b6ee-449e68d546a0\") " pod="openstack/nova-api-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.047725 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.049086 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.057030 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.057889 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.058982 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.110826 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cr68z\" (UniqueName: \"kubernetes.io/projected/4af60a2e-c5d2-4f99-912d-8c269561a2e0-kube-api-access-cr68z\") pod \"nova-metadata-0\" (UID: \"4af60a2e-c5d2-4f99-912d-8c269561a2e0\") " pod="openstack/nova-metadata-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.110958 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4af60a2e-c5d2-4f99-912d-8c269561a2e0-config-data\") pod \"nova-metadata-0\" (UID: \"4af60a2e-c5d2-4f99-912d-8c269561a2e0\") " pod="openstack/nova-metadata-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.111038 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4af60a2e-c5d2-4f99-912d-8c269561a2e0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4af60a2e-c5d2-4f99-912d-8c269561a2e0\") " pod="openstack/nova-metadata-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.111075 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4af60a2e-c5d2-4f99-912d-8c269561a2e0-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4af60a2e-c5d2-4f99-912d-8c269561a2e0\") " pod="openstack/nova-metadata-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.111111 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4af60a2e-c5d2-4f99-912d-8c269561a2e0-logs\") pod \"nova-metadata-0\" (UID: \"4af60a2e-c5d2-4f99-912d-8c269561a2e0\") " pod="openstack/nova-metadata-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.212592 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4af60a2e-c5d2-4f99-912d-8c269561a2e0-config-data\") pod \"nova-metadata-0\" (UID: \"4af60a2e-c5d2-4f99-912d-8c269561a2e0\") " pod="openstack/nova-metadata-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.212769 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4af60a2e-c5d2-4f99-912d-8c269561a2e0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4af60a2e-c5d2-4f99-912d-8c269561a2e0\") " pod="openstack/nova-metadata-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.212824 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4af60a2e-c5d2-4f99-912d-8c269561a2e0-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4af60a2e-c5d2-4f99-912d-8c269561a2e0\") " pod="openstack/nova-metadata-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.212880 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4af60a2e-c5d2-4f99-912d-8c269561a2e0-logs\") pod \"nova-metadata-0\" (UID: \"4af60a2e-c5d2-4f99-912d-8c269561a2e0\") " pod="openstack/nova-metadata-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.212986 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cr68z\" (UniqueName: \"kubernetes.io/projected/4af60a2e-c5d2-4f99-912d-8c269561a2e0-kube-api-access-cr68z\") pod \"nova-metadata-0\" (UID: \"4af60a2e-c5d2-4f99-912d-8c269561a2e0\") " pod="openstack/nova-metadata-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.214036 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4af60a2e-c5d2-4f99-912d-8c269561a2e0-logs\") pod \"nova-metadata-0\" (UID: \"4af60a2e-c5d2-4f99-912d-8c269561a2e0\") " pod="openstack/nova-metadata-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.216055 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4af60a2e-c5d2-4f99-912d-8c269561a2e0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4af60a2e-c5d2-4f99-912d-8c269561a2e0\") " pod="openstack/nova-metadata-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.217267 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4af60a2e-c5d2-4f99-912d-8c269561a2e0-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4af60a2e-c5d2-4f99-912d-8c269561a2e0\") " pod="openstack/nova-metadata-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.218234 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4af60a2e-c5d2-4f99-912d-8c269561a2e0-config-data\") pod \"nova-metadata-0\" (UID: \"4af60a2e-c5d2-4f99-912d-8c269561a2e0\") " pod="openstack/nova-metadata-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.240677 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cr68z\" (UniqueName: \"kubernetes.io/projected/4af60a2e-c5d2-4f99-912d-8c269561a2e0-kube-api-access-cr68z\") pod \"nova-metadata-0\" (UID: \"4af60a2e-c5d2-4f99-912d-8c269561a2e0\") " pod="openstack/nova-metadata-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.339572 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.421531 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1aa735ee-28a3-4e0e-8f53-f2baea365124" path="/var/lib/kubelet/pods/1aa735ee-28a3-4e0e-8f53-f2baea365124/volumes" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.422866 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f965705-8dcd-412b-8850-1e3fba1e9a95" path="/var/lib/kubelet/pods/3f965705-8dcd-412b-8850-1e3fba1e9a95/volumes" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.423910 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52ab23f2-ab24-405e-8a1c-cdac875e433b" path="/var/lib/kubelet/pods/52ab23f2-ab24-405e-8a1c-cdac875e433b/volumes" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.425973 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.660816 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4fc80e12-1f82-458d-a8c1-4e7625a9381c","Type":"ContainerStarted","Data":"16a3a4b1ffd4695c59d7a57557dabfc96b5ec4a108cc352ce9e834c5e012b941"} Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.691359 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.6913382930000003 podStartE2EDuration="2.691338293s" podCreationTimestamp="2025-11-25 23:20:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:20:52.68683129 +0000 UTC m=+1309.044490402" watchObservedRunningTime="2025-11-25 23:20:52.691338293 +0000 UTC m=+1309.048997405" Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.878933 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 23:20:52 crc kubenswrapper[5045]: I1125 23:20:52.974134 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 23:20:52 crc kubenswrapper[5045]: W1125 23:20:52.984193 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4af60a2e_c5d2_4f99_912d_8c269561a2e0.slice/crio-9b9e46fb5e9170112390210b44a5743efa9df80b10751d550899f4095abd0aed WatchSource:0}: Error finding container 9b9e46fb5e9170112390210b44a5743efa9df80b10751d550899f4095abd0aed: Status 404 returned error can't find the container with id 9b9e46fb5e9170112390210b44a5743efa9df80b10751d550899f4095abd0aed Nov 25 23:20:53 crc kubenswrapper[5045]: I1125 23:20:53.677298 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"65d3b56d-211c-4392-b6ee-449e68d546a0","Type":"ContainerStarted","Data":"b028fee215f9e115965dfc3068120b9ca13a8e0e928c090790eb7684e4513244"} Nov 25 23:20:53 crc kubenswrapper[5045]: I1125 23:20:53.677551 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"65d3b56d-211c-4392-b6ee-449e68d546a0","Type":"ContainerStarted","Data":"25811497f9c5fb50bbc3b2ec6217d649cce3fc75cc5fceff97e02b605ff70de6"} Nov 25 23:20:53 crc kubenswrapper[5045]: I1125 23:20:53.677563 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"65d3b56d-211c-4392-b6ee-449e68d546a0","Type":"ContainerStarted","Data":"d0324e8e462a296853abc3bd51c7873a9e9ec1039496a8f7d41d5e4a7c583296"} Nov 25 23:20:53 crc kubenswrapper[5045]: I1125 23:20:53.681076 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4af60a2e-c5d2-4f99-912d-8c269561a2e0","Type":"ContainerStarted","Data":"8c2b29d569a7e22c7b2a193559f9591c01ac9507240e8853ddfb979f3d5a989e"} Nov 25 23:20:53 crc kubenswrapper[5045]: I1125 23:20:53.681106 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4af60a2e-c5d2-4f99-912d-8c269561a2e0","Type":"ContainerStarted","Data":"ecde81920117250bd5a43dd8e136fb484366b1010afb9cf2e94993a4072140c7"} Nov 25 23:20:53 crc kubenswrapper[5045]: I1125 23:20:53.681116 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4af60a2e-c5d2-4f99-912d-8c269561a2e0","Type":"ContainerStarted","Data":"9b9e46fb5e9170112390210b44a5743efa9df80b10751d550899f4095abd0aed"} Nov 25 23:20:53 crc kubenswrapper[5045]: I1125 23:20:53.735603 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=1.735575198 podStartE2EDuration="1.735575198s" podCreationTimestamp="2025-11-25 23:20:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:20:53.729489409 +0000 UTC m=+1310.087148561" watchObservedRunningTime="2025-11-25 23:20:53.735575198 +0000 UTC m=+1310.093234350" Nov 25 23:20:53 crc kubenswrapper[5045]: I1125 23:20:53.766564 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.766539111 podStartE2EDuration="2.766539111s" podCreationTimestamp="2025-11-25 23:20:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:20:53.703970337 +0000 UTC m=+1310.061629469" watchObservedRunningTime="2025-11-25 23:20:53.766539111 +0000 UTC m=+1310.124198223" Nov 25 23:20:56 crc kubenswrapper[5045]: I1125 23:20:56.003218 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 23:20:57 crc kubenswrapper[5045]: I1125 23:20:57.426993 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 23:20:57 crc kubenswrapper[5045]: I1125 23:20:57.427061 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 23:21:00 crc kubenswrapper[5045]: E1125 23:21:00.015955 5045 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod83215ee2_350f_4838_9973_6a8417a491d0.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod83215ee2_350f_4838_9973_6a8417a491d0.slice/crio-889b657a85e05fd36b37e3817014bb257099861292d346268b752ce369175e46\": RecentStats: unable to find data in memory cache]" Nov 25 23:21:00 crc kubenswrapper[5045]: I1125 23:21:00.541194 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:21:00 crc kubenswrapper[5045]: I1125 23:21:00.541318 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:21:00 crc kubenswrapper[5045]: I1125 23:21:00.822592 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 23:21:01 crc kubenswrapper[5045]: I1125 23:21:01.003790 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 23:21:01 crc kubenswrapper[5045]: I1125 23:21:01.037757 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 23:21:01 crc kubenswrapper[5045]: I1125 23:21:01.822181 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 23:21:02 crc kubenswrapper[5045]: I1125 23:21:02.341342 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 23:21:02 crc kubenswrapper[5045]: I1125 23:21:02.341392 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 23:21:02 crc kubenswrapper[5045]: I1125 23:21:02.427194 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 23:21:02 crc kubenswrapper[5045]: I1125 23:21:02.427292 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 23:21:03 crc kubenswrapper[5045]: I1125 23:21:03.356905 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="65d3b56d-211c-4392-b6ee-449e68d546a0" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.186:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 23:21:03 crc kubenswrapper[5045]: I1125 23:21:03.359453 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="65d3b56d-211c-4392-b6ee-449e68d546a0" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.186:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 23:21:03 crc kubenswrapper[5045]: I1125 23:21:03.444921 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="4af60a2e-c5d2-4f99-912d-8c269561a2e0" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.187:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 23:21:03 crc kubenswrapper[5045]: I1125 23:21:03.444912 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="4af60a2e-c5d2-4f99-912d-8c269561a2e0" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.187:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 23:21:04 crc kubenswrapper[5045]: E1125 23:21:04.458766 5045 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/94c6ec757458519afbeda82c8c61538695e619c613d2e9ae118cddd128444abd/diff" to get inode usage: stat /var/lib/containers/storage/overlay/94c6ec757458519afbeda82c8c61538695e619c613d2e9ae118cddd128444abd/diff: no such file or directory, extraDiskErr: Nov 25 23:21:12 crc kubenswrapper[5045]: I1125 23:21:12.351150 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 23:21:12 crc kubenswrapper[5045]: I1125 23:21:12.352305 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 23:21:12 crc kubenswrapper[5045]: I1125 23:21:12.354349 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 23:21:12 crc kubenswrapper[5045]: I1125 23:21:12.363794 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 23:21:12 crc kubenswrapper[5045]: I1125 23:21:12.432615 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 23:21:12 crc kubenswrapper[5045]: I1125 23:21:12.442168 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 23:21:12 crc kubenswrapper[5045]: I1125 23:21:12.444063 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 23:21:12 crc kubenswrapper[5045]: I1125 23:21:12.914091 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 23:21:12 crc kubenswrapper[5045]: I1125 23:21:12.923457 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 23:21:12 crc kubenswrapper[5045]: I1125 23:21:12.925525 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 23:21:21 crc kubenswrapper[5045]: I1125 23:21:21.174766 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 23:21:22 crc kubenswrapper[5045]: I1125 23:21:22.119886 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 23:21:25 crc kubenswrapper[5045]: I1125 23:21:25.159315 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="7fac1a35-2303-42d7-b27b-410ecff1b89a" containerName="rabbitmq" containerID="cri-o://108bbe3c7f2336095b7729d5f6a4df78e7574816cf9e9b31981b21af2743f586" gracePeriod=604797 Nov 25 23:21:26 crc kubenswrapper[5045]: I1125 23:21:26.442086 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="b33fb38e-e7c7-4bb6-92ee-f98e45e71a95" containerName="rabbitmq" containerID="cri-o://ef19997e431b7b51dc41393e77173873feaaa537886ee4ea8bc5516637ecbe1e" gracePeriod=604796 Nov 25 23:21:30 crc kubenswrapper[5045]: I1125 23:21:30.540661 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:21:30 crc kubenswrapper[5045]: I1125 23:21:30.541315 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.809944 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.953576 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-tls\") pod \"7fac1a35-2303-42d7-b27b-410ecff1b89a\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.953864 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"7fac1a35-2303-42d7-b27b-410ecff1b89a\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.953899 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7fac1a35-2303-42d7-b27b-410ecff1b89a-server-conf\") pod \"7fac1a35-2303-42d7-b27b-410ecff1b89a\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.954001 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7fac1a35-2303-42d7-b27b-410ecff1b89a-plugins-conf\") pod \"7fac1a35-2303-42d7-b27b-410ecff1b89a\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.954018 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7fac1a35-2303-42d7-b27b-410ecff1b89a-config-data\") pod \"7fac1a35-2303-42d7-b27b-410ecff1b89a\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.954053 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-plugins\") pod \"7fac1a35-2303-42d7-b27b-410ecff1b89a\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.954072 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-confd\") pod \"7fac1a35-2303-42d7-b27b-410ecff1b89a\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.954114 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-22bsm\" (UniqueName: \"kubernetes.io/projected/7fac1a35-2303-42d7-b27b-410ecff1b89a-kube-api-access-22bsm\") pod \"7fac1a35-2303-42d7-b27b-410ecff1b89a\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.954143 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7fac1a35-2303-42d7-b27b-410ecff1b89a-erlang-cookie-secret\") pod \"7fac1a35-2303-42d7-b27b-410ecff1b89a\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.954161 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-erlang-cookie\") pod \"7fac1a35-2303-42d7-b27b-410ecff1b89a\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.954283 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7fac1a35-2303-42d7-b27b-410ecff1b89a-pod-info\") pod \"7fac1a35-2303-42d7-b27b-410ecff1b89a\" (UID: \"7fac1a35-2303-42d7-b27b-410ecff1b89a\") " Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.955584 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "7fac1a35-2303-42d7-b27b-410ecff1b89a" (UID: "7fac1a35-2303-42d7-b27b-410ecff1b89a"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.955788 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7fac1a35-2303-42d7-b27b-410ecff1b89a-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "7fac1a35-2303-42d7-b27b-410ecff1b89a" (UID: "7fac1a35-2303-42d7-b27b-410ecff1b89a"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.956578 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "7fac1a35-2303-42d7-b27b-410ecff1b89a" (UID: "7fac1a35-2303-42d7-b27b-410ecff1b89a"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.960416 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "7fac1a35-2303-42d7-b27b-410ecff1b89a" (UID: "7fac1a35-2303-42d7-b27b-410ecff1b89a"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.961761 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "7fac1a35-2303-42d7-b27b-410ecff1b89a" (UID: "7fac1a35-2303-42d7-b27b-410ecff1b89a"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.963581 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fac1a35-2303-42d7-b27b-410ecff1b89a-kube-api-access-22bsm" (OuterVolumeSpecName: "kube-api-access-22bsm") pod "7fac1a35-2303-42d7-b27b-410ecff1b89a" (UID: "7fac1a35-2303-42d7-b27b-410ecff1b89a"). InnerVolumeSpecName "kube-api-access-22bsm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.963887 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fac1a35-2303-42d7-b27b-410ecff1b89a-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "7fac1a35-2303-42d7-b27b-410ecff1b89a" (UID: "7fac1a35-2303-42d7-b27b-410ecff1b89a"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.976005 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/7fac1a35-2303-42d7-b27b-410ecff1b89a-pod-info" (OuterVolumeSpecName: "pod-info") pod "7fac1a35-2303-42d7-b27b-410ecff1b89a" (UID: "7fac1a35-2303-42d7-b27b-410ecff1b89a"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 23:21:31 crc kubenswrapper[5045]: I1125 23:21:31.989467 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7fac1a35-2303-42d7-b27b-410ecff1b89a-config-data" (OuterVolumeSpecName: "config-data") pod "7fac1a35-2303-42d7-b27b-410ecff1b89a" (UID: "7fac1a35-2303-42d7-b27b-410ecff1b89a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.049955 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7fac1a35-2303-42d7-b27b-410ecff1b89a-server-conf" (OuterVolumeSpecName: "server-conf") pod "7fac1a35-2303-42d7-b27b-410ecff1b89a" (UID: "7fac1a35-2303-42d7-b27b-410ecff1b89a"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.056784 5045 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7fac1a35-2303-42d7-b27b-410ecff1b89a-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.056831 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7fac1a35-2303-42d7-b27b-410ecff1b89a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.056845 5045 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.056858 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-22bsm\" (UniqueName: \"kubernetes.io/projected/7fac1a35-2303-42d7-b27b-410ecff1b89a-kube-api-access-22bsm\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.056872 5045 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7fac1a35-2303-42d7-b27b-410ecff1b89a-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.056883 5045 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.056893 5045 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7fac1a35-2303-42d7-b27b-410ecff1b89a-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.056903 5045 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.056937 5045 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.056948 5045 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7fac1a35-2303-42d7-b27b-410ecff1b89a-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.078165 5045 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.101035 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "7fac1a35-2303-42d7-b27b-410ecff1b89a" (UID: "7fac1a35-2303-42d7-b27b-410ecff1b89a"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.119105 5045 generic.go:334] "Generic (PLEG): container finished" podID="7fac1a35-2303-42d7-b27b-410ecff1b89a" containerID="108bbe3c7f2336095b7729d5f6a4df78e7574816cf9e9b31981b21af2743f586" exitCode=0 Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.119149 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.119168 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7fac1a35-2303-42d7-b27b-410ecff1b89a","Type":"ContainerDied","Data":"108bbe3c7f2336095b7729d5f6a4df78e7574816cf9e9b31981b21af2743f586"} Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.119623 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7fac1a35-2303-42d7-b27b-410ecff1b89a","Type":"ContainerDied","Data":"0635c2263e6920e46260d89e8f63c1856a28d36e451f5b0f65a6bac32eeebb4c"} Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.119654 5045 scope.go:117] "RemoveContainer" containerID="108bbe3c7f2336095b7729d5f6a4df78e7574816cf9e9b31981b21af2743f586" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.142618 5045 scope.go:117] "RemoveContainer" containerID="08bb8b30e0afb480d73c05026da488ee13cf6aa9849f2d7245739149bf2c503d" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.158689 5045 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.158730 5045 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7fac1a35-2303-42d7-b27b-410ecff1b89a-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.161326 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.170240 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.186484 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 23:21:32 crc kubenswrapper[5045]: E1125 23:21:32.186807 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fac1a35-2303-42d7-b27b-410ecff1b89a" containerName="setup-container" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.186820 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fac1a35-2303-42d7-b27b-410ecff1b89a" containerName="setup-container" Nov 25 23:21:32 crc kubenswrapper[5045]: E1125 23:21:32.186835 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fac1a35-2303-42d7-b27b-410ecff1b89a" containerName="rabbitmq" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.186840 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fac1a35-2303-42d7-b27b-410ecff1b89a" containerName="rabbitmq" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.187006 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fac1a35-2303-42d7-b27b-410ecff1b89a" containerName="rabbitmq" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.187810 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.192580 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.192633 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.192738 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.192813 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.192938 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.195992 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.196334 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-hdhzj" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.199206 5045 scope.go:117] "RemoveContainer" containerID="108bbe3c7f2336095b7729d5f6a4df78e7574816cf9e9b31981b21af2743f586" Nov 25 23:21:32 crc kubenswrapper[5045]: E1125 23:21:32.199701 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"108bbe3c7f2336095b7729d5f6a4df78e7574816cf9e9b31981b21af2743f586\": container with ID starting with 108bbe3c7f2336095b7729d5f6a4df78e7574816cf9e9b31981b21af2743f586 not found: ID does not exist" containerID="108bbe3c7f2336095b7729d5f6a4df78e7574816cf9e9b31981b21af2743f586" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.199758 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"108bbe3c7f2336095b7729d5f6a4df78e7574816cf9e9b31981b21af2743f586"} err="failed to get container status \"108bbe3c7f2336095b7729d5f6a4df78e7574816cf9e9b31981b21af2743f586\": rpc error: code = NotFound desc = could not find container \"108bbe3c7f2336095b7729d5f6a4df78e7574816cf9e9b31981b21af2743f586\": container with ID starting with 108bbe3c7f2336095b7729d5f6a4df78e7574816cf9e9b31981b21af2743f586 not found: ID does not exist" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.199779 5045 scope.go:117] "RemoveContainer" containerID="08bb8b30e0afb480d73c05026da488ee13cf6aa9849f2d7245739149bf2c503d" Nov 25 23:21:32 crc kubenswrapper[5045]: E1125 23:21:32.206439 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08bb8b30e0afb480d73c05026da488ee13cf6aa9849f2d7245739149bf2c503d\": container with ID starting with 08bb8b30e0afb480d73c05026da488ee13cf6aa9849f2d7245739149bf2c503d not found: ID does not exist" containerID="08bb8b30e0afb480d73c05026da488ee13cf6aa9849f2d7245739149bf2c503d" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.206478 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08bb8b30e0afb480d73c05026da488ee13cf6aa9849f2d7245739149bf2c503d"} err="failed to get container status \"08bb8b30e0afb480d73c05026da488ee13cf6aa9849f2d7245739149bf2c503d\": rpc error: code = NotFound desc = could not find container \"08bb8b30e0afb480d73c05026da488ee13cf6aa9849f2d7245739149bf2c503d\": container with ID starting with 08bb8b30e0afb480d73c05026da488ee13cf6aa9849f2d7245739149bf2c503d not found: ID does not exist" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.247691 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.261767 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/73cc82f8-4d6b-4608-9881-664a8194fc6f-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.261887 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/73cc82f8-4d6b-4608-9881-664a8194fc6f-server-conf\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.261935 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/73cc82f8-4d6b-4608-9881-664a8194fc6f-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.261991 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/73cc82f8-4d6b-4608-9881-664a8194fc6f-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.262017 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/73cc82f8-4d6b-4608-9881-664a8194fc6f-pod-info\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.262092 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/73cc82f8-4d6b-4608-9881-664a8194fc6f-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.262120 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.262148 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/73cc82f8-4d6b-4608-9881-664a8194fc6f-config-data\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.262164 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/73cc82f8-4d6b-4608-9881-664a8194fc6f-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.262178 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghl5v\" (UniqueName: \"kubernetes.io/projected/73cc82f8-4d6b-4608-9881-664a8194fc6f-kube-api-access-ghl5v\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.262204 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/73cc82f8-4d6b-4608-9881-664a8194fc6f-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.364559 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/73cc82f8-4d6b-4608-9881-664a8194fc6f-server-conf\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.364640 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/73cc82f8-4d6b-4608-9881-664a8194fc6f-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.364680 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/73cc82f8-4d6b-4608-9881-664a8194fc6f-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.364705 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/73cc82f8-4d6b-4608-9881-664a8194fc6f-pod-info\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.364805 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/73cc82f8-4d6b-4608-9881-664a8194fc6f-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.364842 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.364883 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/73cc82f8-4d6b-4608-9881-664a8194fc6f-config-data\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.364909 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/73cc82f8-4d6b-4608-9881-664a8194fc6f-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.364931 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghl5v\" (UniqueName: \"kubernetes.io/projected/73cc82f8-4d6b-4608-9881-664a8194fc6f-kube-api-access-ghl5v\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.364965 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/73cc82f8-4d6b-4608-9881-664a8194fc6f-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.364995 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/73cc82f8-4d6b-4608-9881-664a8194fc6f-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.365298 5045 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.365690 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/73cc82f8-4d6b-4608-9881-664a8194fc6f-server-conf\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.369047 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/73cc82f8-4d6b-4608-9881-664a8194fc6f-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.369142 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/73cc82f8-4d6b-4608-9881-664a8194fc6f-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.369817 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/73cc82f8-4d6b-4608-9881-664a8194fc6f-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.372440 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/73cc82f8-4d6b-4608-9881-664a8194fc6f-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.372740 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/73cc82f8-4d6b-4608-9881-664a8194fc6f-config-data\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.373104 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/73cc82f8-4d6b-4608-9881-664a8194fc6f-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.374686 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/73cc82f8-4d6b-4608-9881-664a8194fc6f-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.375845 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/73cc82f8-4d6b-4608-9881-664a8194fc6f-pod-info\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.389376 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghl5v\" (UniqueName: \"kubernetes.io/projected/73cc82f8-4d6b-4608-9881-664a8194fc6f-kube-api-access-ghl5v\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.394824 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"73cc82f8-4d6b-4608-9881-664a8194fc6f\") " pod="openstack/rabbitmq-server-0" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.406121 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7fac1a35-2303-42d7-b27b-410ecff1b89a" path="/var/lib/kubelet/pods/7fac1a35-2303-42d7-b27b-410ecff1b89a/volumes" Nov 25 23:21:32 crc kubenswrapper[5045]: I1125 23:21:32.508986 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.070820 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.123130 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.132732 5045 generic.go:334] "Generic (PLEG): container finished" podID="b33fb38e-e7c7-4bb6-92ee-f98e45e71a95" containerID="ef19997e431b7b51dc41393e77173873feaaa537886ee4ea8bc5516637ecbe1e" exitCode=0 Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.132801 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95","Type":"ContainerDied","Data":"ef19997e431b7b51dc41393e77173873feaaa537886ee4ea8bc5516637ecbe1e"} Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.132823 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.132842 5045 scope.go:117] "RemoveContainer" containerID="ef19997e431b7b51dc41393e77173873feaaa537886ee4ea8bc5516637ecbe1e" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.132829 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95","Type":"ContainerDied","Data":"b676b9f3e241789bdc5fe601c3a249dc871c4b34599754deaa2c8ead95e17f07"} Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.138465 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"73cc82f8-4d6b-4608-9881-664a8194fc6f","Type":"ContainerStarted","Data":"84a80609421748508b3c5308f733c8bd5d138ef7849b03cd3c30adeaa7e802dc"} Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.179685 5045 scope.go:117] "RemoveContainer" containerID="c4c8fbce961154e8c88d1959873b9c4f63603220c3203632dd1b0f746dab6d48" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.190943 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-plugins\") pod \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.191461 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95" (UID: "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.191557 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-confd\") pod \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.191582 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-tls\") pod \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.191620 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-config-data\") pod \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.191646 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.191707 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-pod-info\") pod \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.191753 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-erlang-cookie-secret\") pod \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.191779 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-erlang-cookie\") pod \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.191807 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-server-conf\") pod \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.191842 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ksmb\" (UniqueName: \"kubernetes.io/projected/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-kube-api-access-8ksmb\") pod \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.191911 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-plugins-conf\") pod \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\" (UID: \"b33fb38e-e7c7-4bb6-92ee-f98e45e71a95\") " Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.192302 5045 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.192684 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95" (UID: "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.193438 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95" (UID: "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.199416 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95" (UID: "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.200822 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-kube-api-access-8ksmb" (OuterVolumeSpecName: "kube-api-access-8ksmb") pod "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95" (UID: "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95"). InnerVolumeSpecName "kube-api-access-8ksmb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.202120 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "persistence") pod "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95" (UID: "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.206663 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95" (UID: "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.206903 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-pod-info" (OuterVolumeSpecName: "pod-info") pod "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95" (UID: "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.213185 5045 scope.go:117] "RemoveContainer" containerID="ef19997e431b7b51dc41393e77173873feaaa537886ee4ea8bc5516637ecbe1e" Nov 25 23:21:33 crc kubenswrapper[5045]: E1125 23:21:33.213560 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef19997e431b7b51dc41393e77173873feaaa537886ee4ea8bc5516637ecbe1e\": container with ID starting with ef19997e431b7b51dc41393e77173873feaaa537886ee4ea8bc5516637ecbe1e not found: ID does not exist" containerID="ef19997e431b7b51dc41393e77173873feaaa537886ee4ea8bc5516637ecbe1e" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.213591 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef19997e431b7b51dc41393e77173873feaaa537886ee4ea8bc5516637ecbe1e"} err="failed to get container status \"ef19997e431b7b51dc41393e77173873feaaa537886ee4ea8bc5516637ecbe1e\": rpc error: code = NotFound desc = could not find container \"ef19997e431b7b51dc41393e77173873feaaa537886ee4ea8bc5516637ecbe1e\": container with ID starting with ef19997e431b7b51dc41393e77173873feaaa537886ee4ea8bc5516637ecbe1e not found: ID does not exist" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.213613 5045 scope.go:117] "RemoveContainer" containerID="c4c8fbce961154e8c88d1959873b9c4f63603220c3203632dd1b0f746dab6d48" Nov 25 23:21:33 crc kubenswrapper[5045]: E1125 23:21:33.213900 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4c8fbce961154e8c88d1959873b9c4f63603220c3203632dd1b0f746dab6d48\": container with ID starting with c4c8fbce961154e8c88d1959873b9c4f63603220c3203632dd1b0f746dab6d48 not found: ID does not exist" containerID="c4c8fbce961154e8c88d1959873b9c4f63603220c3203632dd1b0f746dab6d48" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.213924 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4c8fbce961154e8c88d1959873b9c4f63603220c3203632dd1b0f746dab6d48"} err="failed to get container status \"c4c8fbce961154e8c88d1959873b9c4f63603220c3203632dd1b0f746dab6d48\": rpc error: code = NotFound desc = could not find container \"c4c8fbce961154e8c88d1959873b9c4f63603220c3203632dd1b0f746dab6d48\": container with ID starting with c4c8fbce961154e8c88d1959873b9c4f63603220c3203632dd1b0f746dab6d48 not found: ID does not exist" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.221974 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-config-data" (OuterVolumeSpecName: "config-data") pod "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95" (UID: "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.254463 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-server-conf" (OuterVolumeSpecName: "server-conf") pod "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95" (UID: "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.293985 5045 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.294027 5045 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.294039 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.294069 5045 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.294082 5045 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.294094 5045 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.294108 5045 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.294120 5045 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.294131 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ksmb\" (UniqueName: \"kubernetes.io/projected/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-kube-api-access-8ksmb\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.300668 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95" (UID: "b33fb38e-e7c7-4bb6-92ee-f98e45e71a95"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.322130 5045 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.395972 5045 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.396662 5045 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.532654 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.540162 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.563017 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 23:21:33 crc kubenswrapper[5045]: E1125 23:21:33.563466 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b33fb38e-e7c7-4bb6-92ee-f98e45e71a95" containerName="setup-container" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.563481 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="b33fb38e-e7c7-4bb6-92ee-f98e45e71a95" containerName="setup-container" Nov 25 23:21:33 crc kubenswrapper[5045]: E1125 23:21:33.563497 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b33fb38e-e7c7-4bb6-92ee-f98e45e71a95" containerName="rabbitmq" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.563503 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="b33fb38e-e7c7-4bb6-92ee-f98e45e71a95" containerName="rabbitmq" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.563670 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="b33fb38e-e7c7-4bb6-92ee-f98e45e71a95" containerName="rabbitmq" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.564555 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.567190 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-g7cld" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.569600 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.569701 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.569853 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.570756 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.570809 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.570948 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.582499 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.701649 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/74ac97d4-b89e-47c2-b249-9e70da06d165-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.701749 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/74ac97d4-b89e-47c2-b249-9e70da06d165-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.701779 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/74ac97d4-b89e-47c2-b249-9e70da06d165-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.701805 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9d7s\" (UniqueName: \"kubernetes.io/projected/74ac97d4-b89e-47c2-b249-9e70da06d165-kube-api-access-q9d7s\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.701830 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/74ac97d4-b89e-47c2-b249-9e70da06d165-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.701852 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/74ac97d4-b89e-47c2-b249-9e70da06d165-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.701873 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/74ac97d4-b89e-47c2-b249-9e70da06d165-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.701897 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.701944 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/74ac97d4-b89e-47c2-b249-9e70da06d165-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.701978 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/74ac97d4-b89e-47c2-b249-9e70da06d165-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.702001 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/74ac97d4-b89e-47c2-b249-9e70da06d165-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.803555 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/74ac97d4-b89e-47c2-b249-9e70da06d165-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.803616 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/74ac97d4-b89e-47c2-b249-9e70da06d165-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.803651 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9d7s\" (UniqueName: \"kubernetes.io/projected/74ac97d4-b89e-47c2-b249-9e70da06d165-kube-api-access-q9d7s\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.803682 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/74ac97d4-b89e-47c2-b249-9e70da06d165-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.803752 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/74ac97d4-b89e-47c2-b249-9e70da06d165-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.804362 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/74ac97d4-b89e-47c2-b249-9e70da06d165-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.804406 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.804484 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/74ac97d4-b89e-47c2-b249-9e70da06d165-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.804526 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/74ac97d4-b89e-47c2-b249-9e70da06d165-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.804554 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/74ac97d4-b89e-47c2-b249-9e70da06d165-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.804585 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/74ac97d4-b89e-47c2-b249-9e70da06d165-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.805100 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/74ac97d4-b89e-47c2-b249-9e70da06d165-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.805122 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/74ac97d4-b89e-47c2-b249-9e70da06d165-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.805142 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/74ac97d4-b89e-47c2-b249-9e70da06d165-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.805256 5045 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.805472 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/74ac97d4-b89e-47c2-b249-9e70da06d165-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.806355 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/74ac97d4-b89e-47c2-b249-9e70da06d165-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.809498 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/74ac97d4-b89e-47c2-b249-9e70da06d165-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.811986 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/74ac97d4-b89e-47c2-b249-9e70da06d165-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.813133 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/74ac97d4-b89e-47c2-b249-9e70da06d165-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.817421 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/74ac97d4-b89e-47c2-b249-9e70da06d165-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.837179 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9d7s\" (UniqueName: \"kubernetes.io/projected/74ac97d4-b89e-47c2-b249-9e70da06d165-kube-api-access-q9d7s\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.847419 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"74ac97d4-b89e-47c2-b249-9e70da06d165\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:33 crc kubenswrapper[5045]: I1125 23:21:33.883064 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:21:34 crc kubenswrapper[5045]: I1125 23:21:34.424650 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b33fb38e-e7c7-4bb6-92ee-f98e45e71a95" path="/var/lib/kubelet/pods/b33fb38e-e7c7-4bb6-92ee-f98e45e71a95/volumes" Nov 25 23:21:34 crc kubenswrapper[5045]: W1125 23:21:34.429610 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod74ac97d4_b89e_47c2_b249_9e70da06d165.slice/crio-4e3ff7437569361cebb83f5e99ec37aa690579dd6d7b2b42999ea1487abee5e1 WatchSource:0}: Error finding container 4e3ff7437569361cebb83f5e99ec37aa690579dd6d7b2b42999ea1487abee5e1: Status 404 returned error can't find the container with id 4e3ff7437569361cebb83f5e99ec37aa690579dd6d7b2b42999ea1487abee5e1 Nov 25 23:21:34 crc kubenswrapper[5045]: I1125 23:21:34.436677 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 23:21:35 crc kubenswrapper[5045]: I1125 23:21:35.162039 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"74ac97d4-b89e-47c2-b249-9e70da06d165","Type":"ContainerStarted","Data":"4e3ff7437569361cebb83f5e99ec37aa690579dd6d7b2b42999ea1487abee5e1"} Nov 25 23:21:35 crc kubenswrapper[5045]: I1125 23:21:35.165073 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"73cc82f8-4d6b-4608-9881-664a8194fc6f","Type":"ContainerStarted","Data":"ff5561252e4730118f39a085c527b64e1b4114217927d57eb044666bc57df9ba"} Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.141558 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-578b8d767c-zsljl"] Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.143130 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.145950 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.155431 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-config\") pod \"dnsmasq-dns-578b8d767c-zsljl\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.155475 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-ovsdbserver-sb\") pod \"dnsmasq-dns-578b8d767c-zsljl\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.155576 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-dns-svc\") pod \"dnsmasq-dns-578b8d767c-zsljl\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.155768 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-ovsdbserver-nb\") pod \"dnsmasq-dns-578b8d767c-zsljl\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.155936 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-openstack-edpm-ipam\") pod \"dnsmasq-dns-578b8d767c-zsljl\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.156091 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4q2r\" (UniqueName: \"kubernetes.io/projected/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-kube-api-access-t4q2r\") pod \"dnsmasq-dns-578b8d767c-zsljl\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.223634 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-578b8d767c-zsljl"] Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.257499 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-ovsdbserver-nb\") pod \"dnsmasq-dns-578b8d767c-zsljl\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.257604 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-openstack-edpm-ipam\") pod \"dnsmasq-dns-578b8d767c-zsljl\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.257664 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4q2r\" (UniqueName: \"kubernetes.io/projected/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-kube-api-access-t4q2r\") pod \"dnsmasq-dns-578b8d767c-zsljl\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.257700 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-config\") pod \"dnsmasq-dns-578b8d767c-zsljl\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.257746 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-ovsdbserver-sb\") pod \"dnsmasq-dns-578b8d767c-zsljl\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.257800 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-dns-svc\") pod \"dnsmasq-dns-578b8d767c-zsljl\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.258761 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-dns-svc\") pod \"dnsmasq-dns-578b8d767c-zsljl\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.259383 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-ovsdbserver-nb\") pod \"dnsmasq-dns-578b8d767c-zsljl\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.259936 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-openstack-edpm-ipam\") pod \"dnsmasq-dns-578b8d767c-zsljl\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.260325 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-config\") pod \"dnsmasq-dns-578b8d767c-zsljl\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.260566 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-ovsdbserver-sb\") pod \"dnsmasq-dns-578b8d767c-zsljl\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.277449 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4q2r\" (UniqueName: \"kubernetes.io/projected/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-kube-api-access-t4q2r\") pod \"dnsmasq-dns-578b8d767c-zsljl\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.463306 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:36 crc kubenswrapper[5045]: I1125 23:21:36.946059 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-578b8d767c-zsljl"] Nov 25 23:21:36 crc kubenswrapper[5045]: W1125 23:21:36.948310 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb21f8b8b_fdc3_4de2_a7b8_c69053633a9b.slice/crio-6af29b454ba229f2874e6b1a4310075cc497cb6ed0662582102da1f4a010a663 WatchSource:0}: Error finding container 6af29b454ba229f2874e6b1a4310075cc497cb6ed0662582102da1f4a010a663: Status 404 returned error can't find the container with id 6af29b454ba229f2874e6b1a4310075cc497cb6ed0662582102da1f4a010a663 Nov 25 23:21:37 crc kubenswrapper[5045]: I1125 23:21:37.183466 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"74ac97d4-b89e-47c2-b249-9e70da06d165","Type":"ContainerStarted","Data":"4beb97a9bc6b3966a1cfd3dd8a825b0a55818228d83a6ed74400277870945a7f"} Nov 25 23:21:37 crc kubenswrapper[5045]: I1125 23:21:37.186363 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-578b8d767c-zsljl" event={"ID":"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b","Type":"ContainerStarted","Data":"6af29b454ba229f2874e6b1a4310075cc497cb6ed0662582102da1f4a010a663"} Nov 25 23:21:38 crc kubenswrapper[5045]: I1125 23:21:38.195679 5045 generic.go:334] "Generic (PLEG): container finished" podID="b21f8b8b-fdc3-4de2-a7b8-c69053633a9b" containerID="eb2e46883eca681d8293295cb43ee70432bb070de1059dd5c39cdaf5a810c46d" exitCode=0 Nov 25 23:21:38 crc kubenswrapper[5045]: I1125 23:21:38.195801 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-578b8d767c-zsljl" event={"ID":"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b","Type":"ContainerDied","Data":"eb2e46883eca681d8293295cb43ee70432bb070de1059dd5c39cdaf5a810c46d"} Nov 25 23:21:39 crc kubenswrapper[5045]: I1125 23:21:39.214341 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-578b8d767c-zsljl" event={"ID":"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b","Type":"ContainerStarted","Data":"7b9d8b23c6045da47c798bd3dc98cfc20221935f0c5f9be97f82de8298563bab"} Nov 25 23:21:39 crc kubenswrapper[5045]: I1125 23:21:39.215270 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:39 crc kubenswrapper[5045]: I1125 23:21:39.247321 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-578b8d767c-zsljl" podStartSLOduration=3.24729571 podStartE2EDuration="3.24729571s" podCreationTimestamp="2025-11-25 23:21:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:21:39.245420455 +0000 UTC m=+1355.603079597" watchObservedRunningTime="2025-11-25 23:21:39.24729571 +0000 UTC m=+1355.604954852" Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.465877 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.547465 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68d4b6d797-l48zw"] Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.547748 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" podUID="4d447d31-1ecf-4836-b96f-ebcde66a1cbd" containerName="dnsmasq-dns" containerID="cri-o://f52182ddf9063ebe49fb12f49807bbabe4421c85e00736512bc14b85c6e649a3" gracePeriod=10 Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.713218 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-fbc59fbb7-c924c"] Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.715133 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.729088 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fbc59fbb7-c924c"] Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.875766 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-openstack-edpm-ipam\") pod \"dnsmasq-dns-fbc59fbb7-c924c\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.875833 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-dns-svc\") pod \"dnsmasq-dns-fbc59fbb7-c924c\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.875894 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-ovsdbserver-sb\") pod \"dnsmasq-dns-fbc59fbb7-c924c\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.875983 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sw7jv\" (UniqueName: \"kubernetes.io/projected/760e1230-bac9-431a-9ee5-ead3870b87e7-kube-api-access-sw7jv\") pod \"dnsmasq-dns-fbc59fbb7-c924c\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.876226 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-ovsdbserver-nb\") pod \"dnsmasq-dns-fbc59fbb7-c924c\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.876360 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-config\") pod \"dnsmasq-dns-fbc59fbb7-c924c\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.981804 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-openstack-edpm-ipam\") pod \"dnsmasq-dns-fbc59fbb7-c924c\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.981888 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-dns-svc\") pod \"dnsmasq-dns-fbc59fbb7-c924c\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.981941 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-ovsdbserver-sb\") pod \"dnsmasq-dns-fbc59fbb7-c924c\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.981968 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sw7jv\" (UniqueName: \"kubernetes.io/projected/760e1230-bac9-431a-9ee5-ead3870b87e7-kube-api-access-sw7jv\") pod \"dnsmasq-dns-fbc59fbb7-c924c\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.982040 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-ovsdbserver-nb\") pod \"dnsmasq-dns-fbc59fbb7-c924c\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.982110 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-config\") pod \"dnsmasq-dns-fbc59fbb7-c924c\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.982775 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-openstack-edpm-ipam\") pod \"dnsmasq-dns-fbc59fbb7-c924c\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.982838 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-dns-svc\") pod \"dnsmasq-dns-fbc59fbb7-c924c\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.983306 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-config\") pod \"dnsmasq-dns-fbc59fbb7-c924c\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.983527 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-ovsdbserver-sb\") pod \"dnsmasq-dns-fbc59fbb7-c924c\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:46 crc kubenswrapper[5045]: I1125 23:21:46.985862 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-ovsdbserver-nb\") pod \"dnsmasq-dns-fbc59fbb7-c924c\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.005695 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sw7jv\" (UniqueName: \"kubernetes.io/projected/760e1230-bac9-431a-9ee5-ead3870b87e7-kube-api-access-sw7jv\") pod \"dnsmasq-dns-fbc59fbb7-c924c\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.034496 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.126307 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.289300 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-config\") pod \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\" (UID: \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\") " Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.289374 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xxlw\" (UniqueName: \"kubernetes.io/projected/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-kube-api-access-9xxlw\") pod \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\" (UID: \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\") " Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.289404 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-ovsdbserver-nb\") pod \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\" (UID: \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\") " Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.289454 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-ovsdbserver-sb\") pod \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\" (UID: \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\") " Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.289471 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-dns-svc\") pod \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\" (UID: \"4d447d31-1ecf-4836-b96f-ebcde66a1cbd\") " Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.305789 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-kube-api-access-9xxlw" (OuterVolumeSpecName: "kube-api-access-9xxlw") pod "4d447d31-1ecf-4836-b96f-ebcde66a1cbd" (UID: "4d447d31-1ecf-4836-b96f-ebcde66a1cbd"). InnerVolumeSpecName "kube-api-access-9xxlw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.330997 5045 generic.go:334] "Generic (PLEG): container finished" podID="4d447d31-1ecf-4836-b96f-ebcde66a1cbd" containerID="f52182ddf9063ebe49fb12f49807bbabe4421c85e00736512bc14b85c6e649a3" exitCode=0 Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.331122 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" event={"ID":"4d447d31-1ecf-4836-b96f-ebcde66a1cbd","Type":"ContainerDied","Data":"f52182ddf9063ebe49fb12f49807bbabe4421c85e00736512bc14b85c6e649a3"} Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.331181 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" event={"ID":"4d447d31-1ecf-4836-b96f-ebcde66a1cbd","Type":"ContainerDied","Data":"ca6cfe57fa22665c92d6c752f7c458885341b5b61a12595266cae83ccb14e42c"} Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.331203 5045 scope.go:117] "RemoveContainer" containerID="f52182ddf9063ebe49fb12f49807bbabe4421c85e00736512bc14b85c6e649a3" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.331434 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68d4b6d797-l48zw" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.344763 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4d447d31-1ecf-4836-b96f-ebcde66a1cbd" (UID: "4d447d31-1ecf-4836-b96f-ebcde66a1cbd"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.353699 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4d447d31-1ecf-4836-b96f-ebcde66a1cbd" (UID: "4d447d31-1ecf-4836-b96f-ebcde66a1cbd"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.355660 5045 scope.go:117] "RemoveContainer" containerID="d621c0f0b00602c34bf528be0d1d539fc54111ca95527567d162787db93e8879" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.358478 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4d447d31-1ecf-4836-b96f-ebcde66a1cbd" (UID: "4d447d31-1ecf-4836-b96f-ebcde66a1cbd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.363495 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-config" (OuterVolumeSpecName: "config") pod "4d447d31-1ecf-4836-b96f-ebcde66a1cbd" (UID: "4d447d31-1ecf-4836-b96f-ebcde66a1cbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.380697 5045 scope.go:117] "RemoveContainer" containerID="f52182ddf9063ebe49fb12f49807bbabe4421c85e00736512bc14b85c6e649a3" Nov 25 23:21:47 crc kubenswrapper[5045]: E1125 23:21:47.381160 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f52182ddf9063ebe49fb12f49807bbabe4421c85e00736512bc14b85c6e649a3\": container with ID starting with f52182ddf9063ebe49fb12f49807bbabe4421c85e00736512bc14b85c6e649a3 not found: ID does not exist" containerID="f52182ddf9063ebe49fb12f49807bbabe4421c85e00736512bc14b85c6e649a3" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.381215 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f52182ddf9063ebe49fb12f49807bbabe4421c85e00736512bc14b85c6e649a3"} err="failed to get container status \"f52182ddf9063ebe49fb12f49807bbabe4421c85e00736512bc14b85c6e649a3\": rpc error: code = NotFound desc = could not find container \"f52182ddf9063ebe49fb12f49807bbabe4421c85e00736512bc14b85c6e649a3\": container with ID starting with f52182ddf9063ebe49fb12f49807bbabe4421c85e00736512bc14b85c6e649a3 not found: ID does not exist" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.381243 5045 scope.go:117] "RemoveContainer" containerID="d621c0f0b00602c34bf528be0d1d539fc54111ca95527567d162787db93e8879" Nov 25 23:21:47 crc kubenswrapper[5045]: E1125 23:21:47.381576 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d621c0f0b00602c34bf528be0d1d539fc54111ca95527567d162787db93e8879\": container with ID starting with d621c0f0b00602c34bf528be0d1d539fc54111ca95527567d162787db93e8879 not found: ID does not exist" containerID="d621c0f0b00602c34bf528be0d1d539fc54111ca95527567d162787db93e8879" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.381629 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d621c0f0b00602c34bf528be0d1d539fc54111ca95527567d162787db93e8879"} err="failed to get container status \"d621c0f0b00602c34bf528be0d1d539fc54111ca95527567d162787db93e8879\": rpc error: code = NotFound desc = could not find container \"d621c0f0b00602c34bf528be0d1d539fc54111ca95527567d162787db93e8879\": container with ID starting with d621c0f0b00602c34bf528be0d1d539fc54111ca95527567d162787db93e8879 not found: ID does not exist" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.391606 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xxlw\" (UniqueName: \"kubernetes.io/projected/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-kube-api-access-9xxlw\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.391636 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.391645 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.391654 5045 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.391662 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d447d31-1ecf-4836-b96f-ebcde66a1cbd-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.508342 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fbc59fbb7-c924c"] Nov 25 23:21:47 crc kubenswrapper[5045]: W1125 23:21:47.512050 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod760e1230_bac9_431a_9ee5_ead3870b87e7.slice/crio-ba10a8e43538fc54158a1b71bc8f96f4a5388ed9f1d49997aab7d31c02537678 WatchSource:0}: Error finding container ba10a8e43538fc54158a1b71bc8f96f4a5388ed9f1d49997aab7d31c02537678: Status 404 returned error can't find the container with id ba10a8e43538fc54158a1b71bc8f96f4a5388ed9f1d49997aab7d31c02537678 Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.738157 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68d4b6d797-l48zw"] Nov 25 23:21:47 crc kubenswrapper[5045]: I1125 23:21:47.747505 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-68d4b6d797-l48zw"] Nov 25 23:21:48 crc kubenswrapper[5045]: I1125 23:21:48.341329 5045 generic.go:334] "Generic (PLEG): container finished" podID="760e1230-bac9-431a-9ee5-ead3870b87e7" containerID="7f968466cc8c490ea910978d4e8e883df6287949d7f035dcb89586cb8a833c11" exitCode=0 Nov 25 23:21:48 crc kubenswrapper[5045]: I1125 23:21:48.341421 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" event={"ID":"760e1230-bac9-431a-9ee5-ead3870b87e7","Type":"ContainerDied","Data":"7f968466cc8c490ea910978d4e8e883df6287949d7f035dcb89586cb8a833c11"} Nov 25 23:21:48 crc kubenswrapper[5045]: I1125 23:21:48.341674 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" event={"ID":"760e1230-bac9-431a-9ee5-ead3870b87e7","Type":"ContainerStarted","Data":"ba10a8e43538fc54158a1b71bc8f96f4a5388ed9f1d49997aab7d31c02537678"} Nov 25 23:21:48 crc kubenswrapper[5045]: I1125 23:21:48.409444 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d447d31-1ecf-4836-b96f-ebcde66a1cbd" path="/var/lib/kubelet/pods/4d447d31-1ecf-4836-b96f-ebcde66a1cbd/volumes" Nov 25 23:21:49 crc kubenswrapper[5045]: I1125 23:21:49.358901 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" event={"ID":"760e1230-bac9-431a-9ee5-ead3870b87e7","Type":"ContainerStarted","Data":"f0fdbd4cc42246670e1fc873f5a6be88ea935d4c932b0463bd8de0940b676608"} Nov 25 23:21:49 crc kubenswrapper[5045]: I1125 23:21:49.359183 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:49 crc kubenswrapper[5045]: I1125 23:21:49.405559 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" podStartSLOduration=3.405537737 podStartE2EDuration="3.405537737s" podCreationTimestamp="2025-11-25 23:21:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:21:49.392319897 +0000 UTC m=+1365.749979009" watchObservedRunningTime="2025-11-25 23:21:49.405537737 +0000 UTC m=+1365.763196869" Nov 25 23:21:54 crc kubenswrapper[5045]: I1125 23:21:54.707852 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rkz5f"] Nov 25 23:21:54 crc kubenswrapper[5045]: E1125 23:21:54.709334 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d447d31-1ecf-4836-b96f-ebcde66a1cbd" containerName="dnsmasq-dns" Nov 25 23:21:54 crc kubenswrapper[5045]: I1125 23:21:54.709370 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d447d31-1ecf-4836-b96f-ebcde66a1cbd" containerName="dnsmasq-dns" Nov 25 23:21:54 crc kubenswrapper[5045]: E1125 23:21:54.709426 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d447d31-1ecf-4836-b96f-ebcde66a1cbd" containerName="init" Nov 25 23:21:54 crc kubenswrapper[5045]: I1125 23:21:54.709447 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d447d31-1ecf-4836-b96f-ebcde66a1cbd" containerName="init" Nov 25 23:21:54 crc kubenswrapper[5045]: I1125 23:21:54.709923 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d447d31-1ecf-4836-b96f-ebcde66a1cbd" containerName="dnsmasq-dns" Nov 25 23:21:54 crc kubenswrapper[5045]: I1125 23:21:54.712959 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rkz5f" Nov 25 23:21:54 crc kubenswrapper[5045]: I1125 23:21:54.732938 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rkz5f"] Nov 25 23:21:54 crc kubenswrapper[5045]: I1125 23:21:54.854835 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e-utilities\") pod \"redhat-operators-rkz5f\" (UID: \"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e\") " pod="openshift-marketplace/redhat-operators-rkz5f" Nov 25 23:21:54 crc kubenswrapper[5045]: I1125 23:21:54.854902 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfvww\" (UniqueName: \"kubernetes.io/projected/d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e-kube-api-access-vfvww\") pod \"redhat-operators-rkz5f\" (UID: \"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e\") " pod="openshift-marketplace/redhat-operators-rkz5f" Nov 25 23:21:54 crc kubenswrapper[5045]: I1125 23:21:54.855251 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e-catalog-content\") pod \"redhat-operators-rkz5f\" (UID: \"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e\") " pod="openshift-marketplace/redhat-operators-rkz5f" Nov 25 23:21:54 crc kubenswrapper[5045]: I1125 23:21:54.957574 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e-catalog-content\") pod \"redhat-operators-rkz5f\" (UID: \"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e\") " pod="openshift-marketplace/redhat-operators-rkz5f" Nov 25 23:21:54 crc kubenswrapper[5045]: I1125 23:21:54.957791 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e-utilities\") pod \"redhat-operators-rkz5f\" (UID: \"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e\") " pod="openshift-marketplace/redhat-operators-rkz5f" Nov 25 23:21:54 crc kubenswrapper[5045]: I1125 23:21:54.957864 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfvww\" (UniqueName: \"kubernetes.io/projected/d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e-kube-api-access-vfvww\") pod \"redhat-operators-rkz5f\" (UID: \"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e\") " pod="openshift-marketplace/redhat-operators-rkz5f" Nov 25 23:21:54 crc kubenswrapper[5045]: I1125 23:21:54.958427 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e-catalog-content\") pod \"redhat-operators-rkz5f\" (UID: \"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e\") " pod="openshift-marketplace/redhat-operators-rkz5f" Nov 25 23:21:54 crc kubenswrapper[5045]: I1125 23:21:54.958451 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e-utilities\") pod \"redhat-operators-rkz5f\" (UID: \"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e\") " pod="openshift-marketplace/redhat-operators-rkz5f" Nov 25 23:21:54 crc kubenswrapper[5045]: I1125 23:21:54.985115 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfvww\" (UniqueName: \"kubernetes.io/projected/d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e-kube-api-access-vfvww\") pod \"redhat-operators-rkz5f\" (UID: \"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e\") " pod="openshift-marketplace/redhat-operators-rkz5f" Nov 25 23:21:55 crc kubenswrapper[5045]: I1125 23:21:55.059184 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rkz5f" Nov 25 23:21:55 crc kubenswrapper[5045]: I1125 23:21:55.606466 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rkz5f"] Nov 25 23:21:56 crc kubenswrapper[5045]: I1125 23:21:56.443428 5045 generic.go:334] "Generic (PLEG): container finished" podID="d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e" containerID="9769b1520f53e32454a4128e073ae9732b38ff82e154cd19b9c780b4484ffba3" exitCode=0 Nov 25 23:21:56 crc kubenswrapper[5045]: I1125 23:21:56.443491 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rkz5f" event={"ID":"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e","Type":"ContainerDied","Data":"9769b1520f53e32454a4128e073ae9732b38ff82e154cd19b9c780b4484ffba3"} Nov 25 23:21:56 crc kubenswrapper[5045]: I1125 23:21:56.443549 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rkz5f" event={"ID":"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e","Type":"ContainerStarted","Data":"d39ee8fc72f93a3b1fcff3060ab5577dfedf8caf86f52b96b6f498a36272682f"} Nov 25 23:21:56 crc kubenswrapper[5045]: I1125 23:21:56.448365 5045 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 23:21:57 crc kubenswrapper[5045]: I1125 23:21:57.035811 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:21:57 crc kubenswrapper[5045]: I1125 23:21:57.106907 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-578b8d767c-zsljl"] Nov 25 23:21:57 crc kubenswrapper[5045]: I1125 23:21:57.107275 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-578b8d767c-zsljl" podUID="b21f8b8b-fdc3-4de2-a7b8-c69053633a9b" containerName="dnsmasq-dns" containerID="cri-o://7b9d8b23c6045da47c798bd3dc98cfc20221935f0c5f9be97f82de8298563bab" gracePeriod=10 Nov 25 23:21:57 crc kubenswrapper[5045]: I1125 23:21:57.458244 5045 generic.go:334] "Generic (PLEG): container finished" podID="b21f8b8b-fdc3-4de2-a7b8-c69053633a9b" containerID="7b9d8b23c6045da47c798bd3dc98cfc20221935f0c5f9be97f82de8298563bab" exitCode=0 Nov 25 23:21:57 crc kubenswrapper[5045]: I1125 23:21:57.458326 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-578b8d767c-zsljl" event={"ID":"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b","Type":"ContainerDied","Data":"7b9d8b23c6045da47c798bd3dc98cfc20221935f0c5f9be97f82de8298563bab"} Nov 25 23:21:57 crc kubenswrapper[5045]: I1125 23:21:57.753655 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:57 crc kubenswrapper[5045]: I1125 23:21:57.922063 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-dns-svc\") pod \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " Nov 25 23:21:57 crc kubenswrapper[5045]: I1125 23:21:57.922154 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-openstack-edpm-ipam\") pod \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " Nov 25 23:21:57 crc kubenswrapper[5045]: I1125 23:21:57.922273 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t4q2r\" (UniqueName: \"kubernetes.io/projected/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-kube-api-access-t4q2r\") pod \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " Nov 25 23:21:57 crc kubenswrapper[5045]: I1125 23:21:57.922355 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-config\") pod \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " Nov 25 23:21:57 crc kubenswrapper[5045]: I1125 23:21:57.922380 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-ovsdbserver-nb\") pod \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " Nov 25 23:21:57 crc kubenswrapper[5045]: I1125 23:21:57.922406 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-ovsdbserver-sb\") pod \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\" (UID: \"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b\") " Nov 25 23:21:57 crc kubenswrapper[5045]: I1125 23:21:57.927688 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-kube-api-access-t4q2r" (OuterVolumeSpecName: "kube-api-access-t4q2r") pod "b21f8b8b-fdc3-4de2-a7b8-c69053633a9b" (UID: "b21f8b8b-fdc3-4de2-a7b8-c69053633a9b"). InnerVolumeSpecName "kube-api-access-t4q2r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:21:57 crc kubenswrapper[5045]: I1125 23:21:57.984777 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b21f8b8b-fdc3-4de2-a7b8-c69053633a9b" (UID: "b21f8b8b-fdc3-4de2-a7b8-c69053633a9b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:21:57 crc kubenswrapper[5045]: I1125 23:21:57.987427 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b21f8b8b-fdc3-4de2-a7b8-c69053633a9b" (UID: "b21f8b8b-fdc3-4de2-a7b8-c69053633a9b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:21:57 crc kubenswrapper[5045]: I1125 23:21:57.992259 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "b21f8b8b-fdc3-4de2-a7b8-c69053633a9b" (UID: "b21f8b8b-fdc3-4de2-a7b8-c69053633a9b"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:21:57 crc kubenswrapper[5045]: I1125 23:21:57.999020 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-config" (OuterVolumeSpecName: "config") pod "b21f8b8b-fdc3-4de2-a7b8-c69053633a9b" (UID: "b21f8b8b-fdc3-4de2-a7b8-c69053633a9b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:21:58 crc kubenswrapper[5045]: I1125 23:21:58.020203 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b21f8b8b-fdc3-4de2-a7b8-c69053633a9b" (UID: "b21f8b8b-fdc3-4de2-a7b8-c69053633a9b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:21:58 crc kubenswrapper[5045]: I1125 23:21:58.024396 5045 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:58 crc kubenswrapper[5045]: I1125 23:21:58.024439 5045 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:58 crc kubenswrapper[5045]: I1125 23:21:58.024455 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t4q2r\" (UniqueName: \"kubernetes.io/projected/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-kube-api-access-t4q2r\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:58 crc kubenswrapper[5045]: I1125 23:21:58.024468 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:58 crc kubenswrapper[5045]: I1125 23:21:58.024481 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:58 crc kubenswrapper[5045]: I1125 23:21:58.024492 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 23:21:58 crc kubenswrapper[5045]: I1125 23:21:58.466811 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rkz5f" event={"ID":"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e","Type":"ContainerStarted","Data":"1d2e231b4849acaf310dc8bcb8a9fd3c9ef8373824b900308df7e649372f6d78"} Nov 25 23:21:58 crc kubenswrapper[5045]: I1125 23:21:58.470316 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-578b8d767c-zsljl" event={"ID":"b21f8b8b-fdc3-4de2-a7b8-c69053633a9b","Type":"ContainerDied","Data":"6af29b454ba229f2874e6b1a4310075cc497cb6ed0662582102da1f4a010a663"} Nov 25 23:21:58 crc kubenswrapper[5045]: I1125 23:21:58.470424 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-578b8d767c-zsljl" Nov 25 23:21:58 crc kubenswrapper[5045]: I1125 23:21:58.471217 5045 scope.go:117] "RemoveContainer" containerID="7b9d8b23c6045da47c798bd3dc98cfc20221935f0c5f9be97f82de8298563bab" Nov 25 23:21:58 crc kubenswrapper[5045]: I1125 23:21:58.547952 5045 scope.go:117] "RemoveContainer" containerID="eb2e46883eca681d8293295cb43ee70432bb070de1059dd5c39cdaf5a810c46d" Nov 25 23:21:58 crc kubenswrapper[5045]: I1125 23:21:58.553063 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-578b8d767c-zsljl"] Nov 25 23:21:58 crc kubenswrapper[5045]: I1125 23:21:58.561151 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-578b8d767c-zsljl"] Nov 25 23:21:59 crc kubenswrapper[5045]: I1125 23:21:59.486774 5045 generic.go:334] "Generic (PLEG): container finished" podID="d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e" containerID="1d2e231b4849acaf310dc8bcb8a9fd3c9ef8373824b900308df7e649372f6d78" exitCode=0 Nov 25 23:21:59 crc kubenswrapper[5045]: I1125 23:21:59.486811 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rkz5f" event={"ID":"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e","Type":"ContainerDied","Data":"1d2e231b4849acaf310dc8bcb8a9fd3c9ef8373824b900308df7e649372f6d78"} Nov 25 23:22:00 crc kubenswrapper[5045]: I1125 23:22:00.411673 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b21f8b8b-fdc3-4de2-a7b8-c69053633a9b" path="/var/lib/kubelet/pods/b21f8b8b-fdc3-4de2-a7b8-c69053633a9b/volumes" Nov 25 23:22:00 crc kubenswrapper[5045]: I1125 23:22:00.540977 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:22:00 crc kubenswrapper[5045]: I1125 23:22:00.541062 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:22:00 crc kubenswrapper[5045]: I1125 23:22:00.541276 5045 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 23:22:00 crc kubenswrapper[5045]: I1125 23:22:00.542292 5045 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a38112312cf3b5e81d6e73cbd55baa1467e1d3453486aaf8dbc83baabfa5d34a"} pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 23:22:00 crc kubenswrapper[5045]: I1125 23:22:00.542395 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" containerID="cri-o://a38112312cf3b5e81d6e73cbd55baa1467e1d3453486aaf8dbc83baabfa5d34a" gracePeriod=600 Nov 25 23:22:01 crc kubenswrapper[5045]: I1125 23:22:01.513499 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rkz5f" event={"ID":"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e","Type":"ContainerStarted","Data":"e8db78486f7158476f62ef4e6173fdad1a239ef87aa0797997bd83182222911f"} Nov 25 23:22:01 crc kubenswrapper[5045]: I1125 23:22:01.517913 5045 generic.go:334] "Generic (PLEG): container finished" podID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerID="a38112312cf3b5e81d6e73cbd55baa1467e1d3453486aaf8dbc83baabfa5d34a" exitCode=0 Nov 25 23:22:01 crc kubenswrapper[5045]: I1125 23:22:01.517965 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerDied","Data":"a38112312cf3b5e81d6e73cbd55baa1467e1d3453486aaf8dbc83baabfa5d34a"} Nov 25 23:22:01 crc kubenswrapper[5045]: I1125 23:22:01.518003 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerStarted","Data":"74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708"} Nov 25 23:22:01 crc kubenswrapper[5045]: I1125 23:22:01.518021 5045 scope.go:117] "RemoveContainer" containerID="ad71cc49c7fac24fcefe379082dd68e60c6857b81de28d84310c7d3a35f4b46a" Nov 25 23:22:01 crc kubenswrapper[5045]: I1125 23:22:01.583124 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rkz5f" podStartSLOduration=3.608718067 podStartE2EDuration="7.583110079s" podCreationTimestamp="2025-11-25 23:21:54 +0000 UTC" firstStartedPulling="2025-11-25 23:21:56.447968618 +0000 UTC m=+1372.805627760" lastFinishedPulling="2025-11-25 23:22:00.42236065 +0000 UTC m=+1376.780019772" observedRunningTime="2025-11-25 23:22:01.53532502 +0000 UTC m=+1377.892984132" watchObservedRunningTime="2025-11-25 23:22:01.583110079 +0000 UTC m=+1377.940769191" Nov 25 23:22:05 crc kubenswrapper[5045]: I1125 23:22:05.060444 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rkz5f" Nov 25 23:22:05 crc kubenswrapper[5045]: I1125 23:22:05.061015 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rkz5f" Nov 25 23:22:06 crc kubenswrapper[5045]: I1125 23:22:06.136479 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-rkz5f" podUID="d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e" containerName="registry-server" probeResult="failure" output=< Nov 25 23:22:06 crc kubenswrapper[5045]: timeout: failed to connect service ":50051" within 1s Nov 25 23:22:06 crc kubenswrapper[5045]: > Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.284771 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m"] Nov 25 23:22:07 crc kubenswrapper[5045]: E1125 23:22:07.285782 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b21f8b8b-fdc3-4de2-a7b8-c69053633a9b" containerName="init" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.285805 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="b21f8b8b-fdc3-4de2-a7b8-c69053633a9b" containerName="init" Nov 25 23:22:07 crc kubenswrapper[5045]: E1125 23:22:07.285859 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b21f8b8b-fdc3-4de2-a7b8-c69053633a9b" containerName="dnsmasq-dns" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.285872 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="b21f8b8b-fdc3-4de2-a7b8-c69053633a9b" containerName="dnsmasq-dns" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.286249 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="b21f8b8b-fdc3-4de2-a7b8-c69053633a9b" containerName="dnsmasq-dns" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.287361 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.297153 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.298076 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.298138 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.298246 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.299378 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m"] Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.419194 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12c286e1-8a5e-4427-aa87-819186e68dc4-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m\" (UID: \"12c286e1-8a5e-4427-aa87-819186e68dc4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.419253 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/12c286e1-8a5e-4427-aa87-819186e68dc4-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m\" (UID: \"12c286e1-8a5e-4427-aa87-819186e68dc4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.419289 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/12c286e1-8a5e-4427-aa87-819186e68dc4-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m\" (UID: \"12c286e1-8a5e-4427-aa87-819186e68dc4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.419499 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcf5w\" (UniqueName: \"kubernetes.io/projected/12c286e1-8a5e-4427-aa87-819186e68dc4-kube-api-access-vcf5w\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m\" (UID: \"12c286e1-8a5e-4427-aa87-819186e68dc4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.521622 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12c286e1-8a5e-4427-aa87-819186e68dc4-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m\" (UID: \"12c286e1-8a5e-4427-aa87-819186e68dc4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.521736 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/12c286e1-8a5e-4427-aa87-819186e68dc4-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m\" (UID: \"12c286e1-8a5e-4427-aa87-819186e68dc4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.521791 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/12c286e1-8a5e-4427-aa87-819186e68dc4-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m\" (UID: \"12c286e1-8a5e-4427-aa87-819186e68dc4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.521891 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcf5w\" (UniqueName: \"kubernetes.io/projected/12c286e1-8a5e-4427-aa87-819186e68dc4-kube-api-access-vcf5w\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m\" (UID: \"12c286e1-8a5e-4427-aa87-819186e68dc4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.529469 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/12c286e1-8a5e-4427-aa87-819186e68dc4-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m\" (UID: \"12c286e1-8a5e-4427-aa87-819186e68dc4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.531173 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/12c286e1-8a5e-4427-aa87-819186e68dc4-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m\" (UID: \"12c286e1-8a5e-4427-aa87-819186e68dc4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.532413 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12c286e1-8a5e-4427-aa87-819186e68dc4-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m\" (UID: \"12c286e1-8a5e-4427-aa87-819186e68dc4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.556954 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcf5w\" (UniqueName: \"kubernetes.io/projected/12c286e1-8a5e-4427-aa87-819186e68dc4-kube-api-access-vcf5w\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m\" (UID: \"12c286e1-8a5e-4427-aa87-819186e68dc4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.592685 5045 generic.go:334] "Generic (PLEG): container finished" podID="73cc82f8-4d6b-4608-9881-664a8194fc6f" containerID="ff5561252e4730118f39a085c527b64e1b4114217927d57eb044666bc57df9ba" exitCode=0 Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.592742 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"73cc82f8-4d6b-4608-9881-664a8194fc6f","Type":"ContainerDied","Data":"ff5561252e4730118f39a085c527b64e1b4114217927d57eb044666bc57df9ba"} Nov 25 23:22:07 crc kubenswrapper[5045]: I1125 23:22:07.617436 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" Nov 25 23:22:08 crc kubenswrapper[5045]: I1125 23:22:08.223405 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m"] Nov 25 23:22:08 crc kubenswrapper[5045]: W1125 23:22:08.226757 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod12c286e1_8a5e_4427_aa87_819186e68dc4.slice/crio-71479c62e242d4af2fb09f65e4e4bae4c3c46341ee5efa0a6993e1b6b090b7c4 WatchSource:0}: Error finding container 71479c62e242d4af2fb09f65e4e4bae4c3c46341ee5efa0a6993e1b6b090b7c4: Status 404 returned error can't find the container with id 71479c62e242d4af2fb09f65e4e4bae4c3c46341ee5efa0a6993e1b6b090b7c4 Nov 25 23:22:08 crc kubenswrapper[5045]: I1125 23:22:08.605866 5045 generic.go:334] "Generic (PLEG): container finished" podID="74ac97d4-b89e-47c2-b249-9e70da06d165" containerID="4beb97a9bc6b3966a1cfd3dd8a825b0a55818228d83a6ed74400277870945a7f" exitCode=0 Nov 25 23:22:08 crc kubenswrapper[5045]: I1125 23:22:08.606016 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"74ac97d4-b89e-47c2-b249-9e70da06d165","Type":"ContainerDied","Data":"4beb97a9bc6b3966a1cfd3dd8a825b0a55818228d83a6ed74400277870945a7f"} Nov 25 23:22:08 crc kubenswrapper[5045]: I1125 23:22:08.609238 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"73cc82f8-4d6b-4608-9881-664a8194fc6f","Type":"ContainerStarted","Data":"028b025f892f2c892fa3d788e26c043857a6bc8bab35416db72304a3a2b8f368"} Nov 25 23:22:08 crc kubenswrapper[5045]: I1125 23:22:08.609566 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 23:22:08 crc kubenswrapper[5045]: I1125 23:22:08.611793 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" event={"ID":"12c286e1-8a5e-4427-aa87-819186e68dc4","Type":"ContainerStarted","Data":"71479c62e242d4af2fb09f65e4e4bae4c3c46341ee5efa0a6993e1b6b090b7c4"} Nov 25 23:22:09 crc kubenswrapper[5045]: I1125 23:22:09.626625 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"74ac97d4-b89e-47c2-b249-9e70da06d165","Type":"ContainerStarted","Data":"53607dbdae61f613b5f530f4dedcc2c77a2e2a17f82e2b2d37a4e868f212d297"} Nov 25 23:22:09 crc kubenswrapper[5045]: I1125 23:22:09.628206 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:22:09 crc kubenswrapper[5045]: I1125 23:22:09.661272 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.661247452 podStartE2EDuration="37.661247452s" podCreationTimestamp="2025-11-25 23:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:22:08.668880446 +0000 UTC m=+1385.026539628" watchObservedRunningTime="2025-11-25 23:22:09.661247452 +0000 UTC m=+1386.018906564" Nov 25 23:22:14 crc kubenswrapper[5045]: I1125 23:22:14.435220 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=41.435196437 podStartE2EDuration="41.435196437s" podCreationTimestamp="2025-11-25 23:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:22:09.662159299 +0000 UTC m=+1386.019818411" watchObservedRunningTime="2025-11-25 23:22:14.435196437 +0000 UTC m=+1390.792855549" Nov 25 23:22:15 crc kubenswrapper[5045]: I1125 23:22:15.129865 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rkz5f" Nov 25 23:22:15 crc kubenswrapper[5045]: I1125 23:22:15.207896 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rkz5f" Nov 25 23:22:15 crc kubenswrapper[5045]: I1125 23:22:15.364433 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rkz5f"] Nov 25 23:22:16 crc kubenswrapper[5045]: I1125 23:22:16.692080 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rkz5f" podUID="d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e" containerName="registry-server" containerID="cri-o://e8db78486f7158476f62ef4e6173fdad1a239ef87aa0797997bd83182222911f" gracePeriod=2 Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.672474 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rkz5f" Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.707695 5045 generic.go:334] "Generic (PLEG): container finished" podID="d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e" containerID="e8db78486f7158476f62ef4e6173fdad1a239ef87aa0797997bd83182222911f" exitCode=0 Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.708608 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rkz5f" event={"ID":"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e","Type":"ContainerDied","Data":"e8db78486f7158476f62ef4e6173fdad1a239ef87aa0797997bd83182222911f"} Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.708851 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rkz5f" event={"ID":"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e","Type":"ContainerDied","Data":"d39ee8fc72f93a3b1fcff3060ab5577dfedf8caf86f52b96b6f498a36272682f"} Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.708991 5045 scope.go:117] "RemoveContainer" containerID="e8db78486f7158476f62ef4e6173fdad1a239ef87aa0797997bd83182222911f" Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.709296 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rkz5f" Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.732508 5045 scope.go:117] "RemoveContainer" containerID="1d2e231b4849acaf310dc8bcb8a9fd3c9ef8373824b900308df7e649372f6d78" Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.758474 5045 scope.go:117] "RemoveContainer" containerID="9769b1520f53e32454a4128e073ae9732b38ff82e154cd19b9c780b4484ffba3" Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.784065 5045 scope.go:117] "RemoveContainer" containerID="e8db78486f7158476f62ef4e6173fdad1a239ef87aa0797997bd83182222911f" Nov 25 23:22:17 crc kubenswrapper[5045]: E1125 23:22:17.784571 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8db78486f7158476f62ef4e6173fdad1a239ef87aa0797997bd83182222911f\": container with ID starting with e8db78486f7158476f62ef4e6173fdad1a239ef87aa0797997bd83182222911f not found: ID does not exist" containerID="e8db78486f7158476f62ef4e6173fdad1a239ef87aa0797997bd83182222911f" Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.784631 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8db78486f7158476f62ef4e6173fdad1a239ef87aa0797997bd83182222911f"} err="failed to get container status \"e8db78486f7158476f62ef4e6173fdad1a239ef87aa0797997bd83182222911f\": rpc error: code = NotFound desc = could not find container \"e8db78486f7158476f62ef4e6173fdad1a239ef87aa0797997bd83182222911f\": container with ID starting with e8db78486f7158476f62ef4e6173fdad1a239ef87aa0797997bd83182222911f not found: ID does not exist" Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.784834 5045 scope.go:117] "RemoveContainer" containerID="1d2e231b4849acaf310dc8bcb8a9fd3c9ef8373824b900308df7e649372f6d78" Nov 25 23:22:17 crc kubenswrapper[5045]: E1125 23:22:17.785193 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d2e231b4849acaf310dc8bcb8a9fd3c9ef8373824b900308df7e649372f6d78\": container with ID starting with 1d2e231b4849acaf310dc8bcb8a9fd3c9ef8373824b900308df7e649372f6d78 not found: ID does not exist" containerID="1d2e231b4849acaf310dc8bcb8a9fd3c9ef8373824b900308df7e649372f6d78" Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.785216 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d2e231b4849acaf310dc8bcb8a9fd3c9ef8373824b900308df7e649372f6d78"} err="failed to get container status \"1d2e231b4849acaf310dc8bcb8a9fd3c9ef8373824b900308df7e649372f6d78\": rpc error: code = NotFound desc = could not find container \"1d2e231b4849acaf310dc8bcb8a9fd3c9ef8373824b900308df7e649372f6d78\": container with ID starting with 1d2e231b4849acaf310dc8bcb8a9fd3c9ef8373824b900308df7e649372f6d78 not found: ID does not exist" Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.785228 5045 scope.go:117] "RemoveContainer" containerID="9769b1520f53e32454a4128e073ae9732b38ff82e154cd19b9c780b4484ffba3" Nov 25 23:22:17 crc kubenswrapper[5045]: E1125 23:22:17.785622 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9769b1520f53e32454a4128e073ae9732b38ff82e154cd19b9c780b4484ffba3\": container with ID starting with 9769b1520f53e32454a4128e073ae9732b38ff82e154cd19b9c780b4484ffba3 not found: ID does not exist" containerID="9769b1520f53e32454a4128e073ae9732b38ff82e154cd19b9c780b4484ffba3" Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.785658 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9769b1520f53e32454a4128e073ae9732b38ff82e154cd19b9c780b4484ffba3"} err="failed to get container status \"9769b1520f53e32454a4128e073ae9732b38ff82e154cd19b9c780b4484ffba3\": rpc error: code = NotFound desc = could not find container \"9769b1520f53e32454a4128e073ae9732b38ff82e154cd19b9c780b4484ffba3\": container with ID starting with 9769b1520f53e32454a4128e073ae9732b38ff82e154cd19b9c780b4484ffba3 not found: ID does not exist" Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.843098 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e-catalog-content\") pod \"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e\" (UID: \"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e\") " Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.843239 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e-utilities\") pod \"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e\" (UID: \"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e\") " Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.843392 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vfvww\" (UniqueName: \"kubernetes.io/projected/d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e-kube-api-access-vfvww\") pod \"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e\" (UID: \"d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e\") " Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.844098 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e-utilities" (OuterVolumeSpecName: "utilities") pod "d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e" (UID: "d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.849459 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e-kube-api-access-vfvww" (OuterVolumeSpecName: "kube-api-access-vfvww") pod "d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e" (UID: "d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e"). InnerVolumeSpecName "kube-api-access-vfvww". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.929073 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e" (UID: "d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.945930 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vfvww\" (UniqueName: \"kubernetes.io/projected/d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e-kube-api-access-vfvww\") on node \"crc\" DevicePath \"\"" Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.946166 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:22:17 crc kubenswrapper[5045]: I1125 23:22:17.946265 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:22:18 crc kubenswrapper[5045]: I1125 23:22:18.121286 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rkz5f"] Nov 25 23:22:18 crc kubenswrapper[5045]: I1125 23:22:18.159744 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rkz5f"] Nov 25 23:22:18 crc kubenswrapper[5045]: I1125 23:22:18.407473 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e" path="/var/lib/kubelet/pods/d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e/volumes" Nov 25 23:22:18 crc kubenswrapper[5045]: I1125 23:22:18.723022 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" event={"ID":"12c286e1-8a5e-4427-aa87-819186e68dc4","Type":"ContainerStarted","Data":"e415768329531e33ff2d9fb072313a5b67d9290f31d1561fc0e00dde0a65e2e8"} Nov 25 23:22:18 crc kubenswrapper[5045]: I1125 23:22:18.742501 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" podStartSLOduration=2.552832547 podStartE2EDuration="11.742479698s" podCreationTimestamp="2025-11-25 23:22:07 +0000 UTC" firstStartedPulling="2025-11-25 23:22:08.22962976 +0000 UTC m=+1384.587288872" lastFinishedPulling="2025-11-25 23:22:17.419276911 +0000 UTC m=+1393.776936023" observedRunningTime="2025-11-25 23:22:18.738983575 +0000 UTC m=+1395.096642727" watchObservedRunningTime="2025-11-25 23:22:18.742479698 +0000 UTC m=+1395.100138810" Nov 25 23:22:22 crc kubenswrapper[5045]: I1125 23:22:22.513956 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 23:22:23 crc kubenswrapper[5045]: I1125 23:22:23.887056 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 23:22:29 crc kubenswrapper[5045]: I1125 23:22:29.841908 5045 generic.go:334] "Generic (PLEG): container finished" podID="12c286e1-8a5e-4427-aa87-819186e68dc4" containerID="e415768329531e33ff2d9fb072313a5b67d9290f31d1561fc0e00dde0a65e2e8" exitCode=0 Nov 25 23:22:29 crc kubenswrapper[5045]: I1125 23:22:29.842029 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" event={"ID":"12c286e1-8a5e-4427-aa87-819186e68dc4","Type":"ContainerDied","Data":"e415768329531e33ff2d9fb072313a5b67d9290f31d1561fc0e00dde0a65e2e8"} Nov 25 23:22:31 crc kubenswrapper[5045]: I1125 23:22:31.304628 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" Nov 25 23:22:31 crc kubenswrapper[5045]: I1125 23:22:31.447180 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12c286e1-8a5e-4427-aa87-819186e68dc4-repo-setup-combined-ca-bundle\") pod \"12c286e1-8a5e-4427-aa87-819186e68dc4\" (UID: \"12c286e1-8a5e-4427-aa87-819186e68dc4\") " Nov 25 23:22:31 crc kubenswrapper[5045]: I1125 23:22:31.447303 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/12c286e1-8a5e-4427-aa87-819186e68dc4-inventory\") pod \"12c286e1-8a5e-4427-aa87-819186e68dc4\" (UID: \"12c286e1-8a5e-4427-aa87-819186e68dc4\") " Nov 25 23:22:31 crc kubenswrapper[5045]: I1125 23:22:31.447382 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vcf5w\" (UniqueName: \"kubernetes.io/projected/12c286e1-8a5e-4427-aa87-819186e68dc4-kube-api-access-vcf5w\") pod \"12c286e1-8a5e-4427-aa87-819186e68dc4\" (UID: \"12c286e1-8a5e-4427-aa87-819186e68dc4\") " Nov 25 23:22:31 crc kubenswrapper[5045]: I1125 23:22:31.447586 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/12c286e1-8a5e-4427-aa87-819186e68dc4-ssh-key\") pod \"12c286e1-8a5e-4427-aa87-819186e68dc4\" (UID: \"12c286e1-8a5e-4427-aa87-819186e68dc4\") " Nov 25 23:22:31 crc kubenswrapper[5045]: I1125 23:22:31.453755 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12c286e1-8a5e-4427-aa87-819186e68dc4-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "12c286e1-8a5e-4427-aa87-819186e68dc4" (UID: "12c286e1-8a5e-4427-aa87-819186e68dc4"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:22:31 crc kubenswrapper[5045]: I1125 23:22:31.453826 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12c286e1-8a5e-4427-aa87-819186e68dc4-kube-api-access-vcf5w" (OuterVolumeSpecName: "kube-api-access-vcf5w") pod "12c286e1-8a5e-4427-aa87-819186e68dc4" (UID: "12c286e1-8a5e-4427-aa87-819186e68dc4"). InnerVolumeSpecName "kube-api-access-vcf5w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:22:31 crc kubenswrapper[5045]: I1125 23:22:31.478528 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12c286e1-8a5e-4427-aa87-819186e68dc4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "12c286e1-8a5e-4427-aa87-819186e68dc4" (UID: "12c286e1-8a5e-4427-aa87-819186e68dc4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:22:31 crc kubenswrapper[5045]: I1125 23:22:31.495625 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12c286e1-8a5e-4427-aa87-819186e68dc4-inventory" (OuterVolumeSpecName: "inventory") pod "12c286e1-8a5e-4427-aa87-819186e68dc4" (UID: "12c286e1-8a5e-4427-aa87-819186e68dc4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:22:31 crc kubenswrapper[5045]: I1125 23:22:31.549756 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/12c286e1-8a5e-4427-aa87-819186e68dc4-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:22:31 crc kubenswrapper[5045]: I1125 23:22:31.549791 5045 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12c286e1-8a5e-4427-aa87-819186e68dc4-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:22:31 crc kubenswrapper[5045]: I1125 23:22:31.549803 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/12c286e1-8a5e-4427-aa87-819186e68dc4-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:22:31 crc kubenswrapper[5045]: I1125 23:22:31.549814 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vcf5w\" (UniqueName: \"kubernetes.io/projected/12c286e1-8a5e-4427-aa87-819186e68dc4-kube-api-access-vcf5w\") on node \"crc\" DevicePath \"\"" Nov 25 23:22:31 crc kubenswrapper[5045]: I1125 23:22:31.867455 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" event={"ID":"12c286e1-8a5e-4427-aa87-819186e68dc4","Type":"ContainerDied","Data":"71479c62e242d4af2fb09f65e4e4bae4c3c46341ee5efa0a6993e1b6b090b7c4"} Nov 25 23:22:31 crc kubenswrapper[5045]: I1125 23:22:31.867514 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="71479c62e242d4af2fb09f65e4e4bae4c3c46341ee5efa0a6993e1b6b090b7c4" Nov 25 23:22:31 crc kubenswrapper[5045]: I1125 23:22:31.867615 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.012502 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd"] Nov 25 23:22:32 crc kubenswrapper[5045]: E1125 23:22:32.013040 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e" containerName="extract-utilities" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.013060 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e" containerName="extract-utilities" Nov 25 23:22:32 crc kubenswrapper[5045]: E1125 23:22:32.013074 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e" containerName="extract-content" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.013082 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e" containerName="extract-content" Nov 25 23:22:32 crc kubenswrapper[5045]: E1125 23:22:32.013108 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12c286e1-8a5e-4427-aa87-819186e68dc4" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.013119 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="12c286e1-8a5e-4427-aa87-819186e68dc4" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 23:22:32 crc kubenswrapper[5045]: E1125 23:22:32.013148 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e" containerName="registry-server" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.013155 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e" containerName="registry-server" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.013353 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3b83667-ecb5-4de3-9bb4-b6b3387a9b0e" containerName="registry-server" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.013382 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="12c286e1-8a5e-4427-aa87-819186e68dc4" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.014129 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.016058 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.016257 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.016499 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.016684 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.022641 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd"] Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.195846 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmpbg\" (UniqueName: \"kubernetes.io/projected/42a18661-c8fa-41a6-8480-5547871c51d6-kube-api-access-zmpbg\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd\" (UID: \"42a18661-c8fa-41a6-8480-5547871c51d6\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.195930 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42a18661-c8fa-41a6-8480-5547871c51d6-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd\" (UID: \"42a18661-c8fa-41a6-8480-5547871c51d6\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.196011 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/42a18661-c8fa-41a6-8480-5547871c51d6-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd\" (UID: \"42a18661-c8fa-41a6-8480-5547871c51d6\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.196262 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42a18661-c8fa-41a6-8480-5547871c51d6-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd\" (UID: \"42a18661-c8fa-41a6-8480-5547871c51d6\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.298445 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmpbg\" (UniqueName: \"kubernetes.io/projected/42a18661-c8fa-41a6-8480-5547871c51d6-kube-api-access-zmpbg\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd\" (UID: \"42a18661-c8fa-41a6-8480-5547871c51d6\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.298486 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42a18661-c8fa-41a6-8480-5547871c51d6-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd\" (UID: \"42a18661-c8fa-41a6-8480-5547871c51d6\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.298531 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/42a18661-c8fa-41a6-8480-5547871c51d6-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd\" (UID: \"42a18661-c8fa-41a6-8480-5547871c51d6\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.298572 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42a18661-c8fa-41a6-8480-5547871c51d6-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd\" (UID: \"42a18661-c8fa-41a6-8480-5547871c51d6\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.305947 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42a18661-c8fa-41a6-8480-5547871c51d6-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd\" (UID: \"42a18661-c8fa-41a6-8480-5547871c51d6\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.306040 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/42a18661-c8fa-41a6-8480-5547871c51d6-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd\" (UID: \"42a18661-c8fa-41a6-8480-5547871c51d6\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.308894 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42a18661-c8fa-41a6-8480-5547871c51d6-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd\" (UID: \"42a18661-c8fa-41a6-8480-5547871c51d6\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.328853 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmpbg\" (UniqueName: \"kubernetes.io/projected/42a18661-c8fa-41a6-8480-5547871c51d6-kube-api-access-zmpbg\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd\" (UID: \"42a18661-c8fa-41a6-8480-5547871c51d6\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.344665 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.714422 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd"] Nov 25 23:22:32 crc kubenswrapper[5045]: I1125 23:22:32.879585 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" event={"ID":"42a18661-c8fa-41a6-8480-5547871c51d6","Type":"ContainerStarted","Data":"a968c5b9efb2a4b26e7c2bf2aac23a2c566d946df05264c23a45f53ee7f2c1af"} Nov 25 23:22:33 crc kubenswrapper[5045]: I1125 23:22:33.894440 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" event={"ID":"42a18661-c8fa-41a6-8480-5547871c51d6","Type":"ContainerStarted","Data":"66fad4c6427ab782a5d08fad5a3fde65cec2ada061559dc47c263f8f442a0d90"} Nov 25 23:22:33 crc kubenswrapper[5045]: I1125 23:22:33.918981 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" podStartSLOduration=2.503739873 podStartE2EDuration="2.918961761s" podCreationTimestamp="2025-11-25 23:22:31 +0000 UTC" firstStartedPulling="2025-11-25 23:22:32.720364106 +0000 UTC m=+1409.078023238" lastFinishedPulling="2025-11-25 23:22:33.135585984 +0000 UTC m=+1409.493245126" observedRunningTime="2025-11-25 23:22:33.91483834 +0000 UTC m=+1410.272497462" watchObservedRunningTime="2025-11-25 23:22:33.918961761 +0000 UTC m=+1410.276620883" Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.524182 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2x4vs"] Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.526204 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2x4vs" Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.537581 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2x4vs"] Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.676392 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fcbd566a-af50-4780-8266-ee914dd2878e-utilities\") pod \"certified-operators-2x4vs\" (UID: \"fcbd566a-af50-4780-8266-ee914dd2878e\") " pod="openshift-marketplace/certified-operators-2x4vs" Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.676746 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzwtc\" (UniqueName: \"kubernetes.io/projected/fcbd566a-af50-4780-8266-ee914dd2878e-kube-api-access-wzwtc\") pod \"certified-operators-2x4vs\" (UID: \"fcbd566a-af50-4780-8266-ee914dd2878e\") " pod="openshift-marketplace/certified-operators-2x4vs" Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.676773 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fcbd566a-af50-4780-8266-ee914dd2878e-catalog-content\") pod \"certified-operators-2x4vs\" (UID: \"fcbd566a-af50-4780-8266-ee914dd2878e\") " pod="openshift-marketplace/certified-operators-2x4vs" Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.716388 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wk2rz"] Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.718191 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wk2rz" Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.729242 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wk2rz"] Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.778549 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzwtc\" (UniqueName: \"kubernetes.io/projected/fcbd566a-af50-4780-8266-ee914dd2878e-kube-api-access-wzwtc\") pod \"certified-operators-2x4vs\" (UID: \"fcbd566a-af50-4780-8266-ee914dd2878e\") " pod="openshift-marketplace/certified-operators-2x4vs" Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.778610 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fcbd566a-af50-4780-8266-ee914dd2878e-catalog-content\") pod \"certified-operators-2x4vs\" (UID: \"fcbd566a-af50-4780-8266-ee914dd2878e\") " pod="openshift-marketplace/certified-operators-2x4vs" Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.778691 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fcbd566a-af50-4780-8266-ee914dd2878e-utilities\") pod \"certified-operators-2x4vs\" (UID: \"fcbd566a-af50-4780-8266-ee914dd2878e\") " pod="openshift-marketplace/certified-operators-2x4vs" Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.779150 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fcbd566a-af50-4780-8266-ee914dd2878e-catalog-content\") pod \"certified-operators-2x4vs\" (UID: \"fcbd566a-af50-4780-8266-ee914dd2878e\") " pod="openshift-marketplace/certified-operators-2x4vs" Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.779275 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fcbd566a-af50-4780-8266-ee914dd2878e-utilities\") pod \"certified-operators-2x4vs\" (UID: \"fcbd566a-af50-4780-8266-ee914dd2878e\") " pod="openshift-marketplace/certified-operators-2x4vs" Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.813981 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzwtc\" (UniqueName: \"kubernetes.io/projected/fcbd566a-af50-4780-8266-ee914dd2878e-kube-api-access-wzwtc\") pod \"certified-operators-2x4vs\" (UID: \"fcbd566a-af50-4780-8266-ee914dd2878e\") " pod="openshift-marketplace/certified-operators-2x4vs" Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.851306 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2x4vs" Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.885397 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pf6qg\" (UniqueName: \"kubernetes.io/projected/7bcbef17-d3a5-4905-a3fe-892353a31a1a-kube-api-access-pf6qg\") pod \"redhat-marketplace-wk2rz\" (UID: \"7bcbef17-d3a5-4905-a3fe-892353a31a1a\") " pod="openshift-marketplace/redhat-marketplace-wk2rz" Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.885855 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bcbef17-d3a5-4905-a3fe-892353a31a1a-catalog-content\") pod \"redhat-marketplace-wk2rz\" (UID: \"7bcbef17-d3a5-4905-a3fe-892353a31a1a\") " pod="openshift-marketplace/redhat-marketplace-wk2rz" Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.893021 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bcbef17-d3a5-4905-a3fe-892353a31a1a-utilities\") pod \"redhat-marketplace-wk2rz\" (UID: \"7bcbef17-d3a5-4905-a3fe-892353a31a1a\") " pod="openshift-marketplace/redhat-marketplace-wk2rz" Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.994626 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pf6qg\" (UniqueName: \"kubernetes.io/projected/7bcbef17-d3a5-4905-a3fe-892353a31a1a-kube-api-access-pf6qg\") pod \"redhat-marketplace-wk2rz\" (UID: \"7bcbef17-d3a5-4905-a3fe-892353a31a1a\") " pod="openshift-marketplace/redhat-marketplace-wk2rz" Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.994953 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bcbef17-d3a5-4905-a3fe-892353a31a1a-catalog-content\") pod \"redhat-marketplace-wk2rz\" (UID: \"7bcbef17-d3a5-4905-a3fe-892353a31a1a\") " pod="openshift-marketplace/redhat-marketplace-wk2rz" Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.995590 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bcbef17-d3a5-4905-a3fe-892353a31a1a-catalog-content\") pod \"redhat-marketplace-wk2rz\" (UID: \"7bcbef17-d3a5-4905-a3fe-892353a31a1a\") " pod="openshift-marketplace/redhat-marketplace-wk2rz" Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.995698 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bcbef17-d3a5-4905-a3fe-892353a31a1a-utilities\") pod \"redhat-marketplace-wk2rz\" (UID: \"7bcbef17-d3a5-4905-a3fe-892353a31a1a\") " pod="openshift-marketplace/redhat-marketplace-wk2rz" Nov 25 23:22:54 crc kubenswrapper[5045]: I1125 23:22:54.996043 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bcbef17-d3a5-4905-a3fe-892353a31a1a-utilities\") pod \"redhat-marketplace-wk2rz\" (UID: \"7bcbef17-d3a5-4905-a3fe-892353a31a1a\") " pod="openshift-marketplace/redhat-marketplace-wk2rz" Nov 25 23:22:55 crc kubenswrapper[5045]: I1125 23:22:55.018919 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pf6qg\" (UniqueName: \"kubernetes.io/projected/7bcbef17-d3a5-4905-a3fe-892353a31a1a-kube-api-access-pf6qg\") pod \"redhat-marketplace-wk2rz\" (UID: \"7bcbef17-d3a5-4905-a3fe-892353a31a1a\") " pod="openshift-marketplace/redhat-marketplace-wk2rz" Nov 25 23:22:55 crc kubenswrapper[5045]: I1125 23:22:55.038924 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wk2rz" Nov 25 23:22:55 crc kubenswrapper[5045]: I1125 23:22:55.368014 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2x4vs"] Nov 25 23:22:55 crc kubenswrapper[5045]: W1125 23:22:55.375390 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfcbd566a_af50_4780_8266_ee914dd2878e.slice/crio-6f935c7ecf4c6de8562d48e96a0ef240dada69b202a7fec5e5bc702cc757f231 WatchSource:0}: Error finding container 6f935c7ecf4c6de8562d48e96a0ef240dada69b202a7fec5e5bc702cc757f231: Status 404 returned error can't find the container with id 6f935c7ecf4c6de8562d48e96a0ef240dada69b202a7fec5e5bc702cc757f231 Nov 25 23:22:55 crc kubenswrapper[5045]: I1125 23:22:55.537544 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wk2rz"] Nov 25 23:22:55 crc kubenswrapper[5045]: W1125 23:22:55.539504 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7bcbef17_d3a5_4905_a3fe_892353a31a1a.slice/crio-0efb024b56f67e67d13cc8fdbd687a0c493a130f12f41f55d8c90b5edf68781c WatchSource:0}: Error finding container 0efb024b56f67e67d13cc8fdbd687a0c493a130f12f41f55d8c90b5edf68781c: Status 404 returned error can't find the container with id 0efb024b56f67e67d13cc8fdbd687a0c493a130f12f41f55d8c90b5edf68781c Nov 25 23:22:56 crc kubenswrapper[5045]: I1125 23:22:56.194816 5045 generic.go:334] "Generic (PLEG): container finished" podID="fcbd566a-af50-4780-8266-ee914dd2878e" containerID="dc780dd9271c6f449a4d4d3afc78f2c2c5c6f9f51eb57f460432e6b4a44a17fb" exitCode=0 Nov 25 23:22:56 crc kubenswrapper[5045]: I1125 23:22:56.194904 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2x4vs" event={"ID":"fcbd566a-af50-4780-8266-ee914dd2878e","Type":"ContainerDied","Data":"dc780dd9271c6f449a4d4d3afc78f2c2c5c6f9f51eb57f460432e6b4a44a17fb"} Nov 25 23:22:56 crc kubenswrapper[5045]: I1125 23:22:56.194935 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2x4vs" event={"ID":"fcbd566a-af50-4780-8266-ee914dd2878e","Type":"ContainerStarted","Data":"6f935c7ecf4c6de8562d48e96a0ef240dada69b202a7fec5e5bc702cc757f231"} Nov 25 23:22:56 crc kubenswrapper[5045]: I1125 23:22:56.199162 5045 generic.go:334] "Generic (PLEG): container finished" podID="7bcbef17-d3a5-4905-a3fe-892353a31a1a" containerID="64751012028a080f1dfa1fc7d27eacc8afef771a3ec8b5a521f35157cef55474" exitCode=0 Nov 25 23:22:56 crc kubenswrapper[5045]: I1125 23:22:56.199198 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wk2rz" event={"ID":"7bcbef17-d3a5-4905-a3fe-892353a31a1a","Type":"ContainerDied","Data":"64751012028a080f1dfa1fc7d27eacc8afef771a3ec8b5a521f35157cef55474"} Nov 25 23:22:56 crc kubenswrapper[5045]: I1125 23:22:56.199223 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wk2rz" event={"ID":"7bcbef17-d3a5-4905-a3fe-892353a31a1a","Type":"ContainerStarted","Data":"0efb024b56f67e67d13cc8fdbd687a0c493a130f12f41f55d8c90b5edf68781c"} Nov 25 23:22:57 crc kubenswrapper[5045]: I1125 23:22:57.216252 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wk2rz" event={"ID":"7bcbef17-d3a5-4905-a3fe-892353a31a1a","Type":"ContainerStarted","Data":"91e07a189be80065a63b880572f76fded844b350c94a92a32a9a354f8d207f8c"} Nov 25 23:22:57 crc kubenswrapper[5045]: I1125 23:22:57.218386 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2x4vs" event={"ID":"fcbd566a-af50-4780-8266-ee914dd2878e","Type":"ContainerStarted","Data":"b8fc245be20d6a31934899100a4c4c06ab0856c5ffc3df9af81520c07479d20d"} Nov 25 23:22:58 crc kubenswrapper[5045]: I1125 23:22:58.232408 5045 generic.go:334] "Generic (PLEG): container finished" podID="7bcbef17-d3a5-4905-a3fe-892353a31a1a" containerID="91e07a189be80065a63b880572f76fded844b350c94a92a32a9a354f8d207f8c" exitCode=0 Nov 25 23:22:58 crc kubenswrapper[5045]: I1125 23:22:58.232527 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wk2rz" event={"ID":"7bcbef17-d3a5-4905-a3fe-892353a31a1a","Type":"ContainerDied","Data":"91e07a189be80065a63b880572f76fded844b350c94a92a32a9a354f8d207f8c"} Nov 25 23:22:58 crc kubenswrapper[5045]: I1125 23:22:58.240384 5045 generic.go:334] "Generic (PLEG): container finished" podID="fcbd566a-af50-4780-8266-ee914dd2878e" containerID="b8fc245be20d6a31934899100a4c4c06ab0856c5ffc3df9af81520c07479d20d" exitCode=0 Nov 25 23:22:58 crc kubenswrapper[5045]: I1125 23:22:58.240429 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2x4vs" event={"ID":"fcbd566a-af50-4780-8266-ee914dd2878e","Type":"ContainerDied","Data":"b8fc245be20d6a31934899100a4c4c06ab0856c5ffc3df9af81520c07479d20d"} Nov 25 23:22:59 crc kubenswrapper[5045]: I1125 23:22:59.253753 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2x4vs" event={"ID":"fcbd566a-af50-4780-8266-ee914dd2878e","Type":"ContainerStarted","Data":"bd9cf89e86d2c934c609c0a59d702273fdcabdc1d0d0e35e50fb9a689c8a0f93"} Nov 25 23:22:59 crc kubenswrapper[5045]: I1125 23:22:59.258014 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wk2rz" event={"ID":"7bcbef17-d3a5-4905-a3fe-892353a31a1a","Type":"ContainerStarted","Data":"a034709e7708a17cb0b709909fb11ba00de72d6034ab2cd353ac396dad9e2f20"} Nov 25 23:22:59 crc kubenswrapper[5045]: I1125 23:22:59.276606 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2x4vs" podStartSLOduration=2.785795968 podStartE2EDuration="5.276583945s" podCreationTimestamp="2025-11-25 23:22:54 +0000 UTC" firstStartedPulling="2025-11-25 23:22:56.198704445 +0000 UTC m=+1432.556363607" lastFinishedPulling="2025-11-25 23:22:58.689492432 +0000 UTC m=+1435.047151584" observedRunningTime="2025-11-25 23:22:59.273202435 +0000 UTC m=+1435.630861587" watchObservedRunningTime="2025-11-25 23:22:59.276583945 +0000 UTC m=+1435.634243087" Nov 25 23:22:59 crc kubenswrapper[5045]: I1125 23:22:59.293364 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wk2rz" podStartSLOduration=2.85645614 podStartE2EDuration="5.293345809s" podCreationTimestamp="2025-11-25 23:22:54 +0000 UTC" firstStartedPulling="2025-11-25 23:22:56.202673162 +0000 UTC m=+1432.560332284" lastFinishedPulling="2025-11-25 23:22:58.639562831 +0000 UTC m=+1434.997221953" observedRunningTime="2025-11-25 23:22:59.290955428 +0000 UTC m=+1435.648614550" watchObservedRunningTime="2025-11-25 23:22:59.293345809 +0000 UTC m=+1435.651004931" Nov 25 23:23:04 crc kubenswrapper[5045]: I1125 23:23:04.852198 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2x4vs" Nov 25 23:23:04 crc kubenswrapper[5045]: I1125 23:23:04.854879 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2x4vs" Nov 25 23:23:04 crc kubenswrapper[5045]: I1125 23:23:04.951782 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2x4vs" Nov 25 23:23:05 crc kubenswrapper[5045]: I1125 23:23:05.040479 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wk2rz" Nov 25 23:23:05 crc kubenswrapper[5045]: I1125 23:23:05.040568 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wk2rz" Nov 25 23:23:05 crc kubenswrapper[5045]: I1125 23:23:05.126545 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wk2rz" Nov 25 23:23:05 crc kubenswrapper[5045]: I1125 23:23:05.430253 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2x4vs" Nov 25 23:23:05 crc kubenswrapper[5045]: I1125 23:23:05.434073 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wk2rz" Nov 25 23:23:06 crc kubenswrapper[5045]: I1125 23:23:06.802474 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2x4vs"] Nov 25 23:23:07 crc kubenswrapper[5045]: I1125 23:23:07.365952 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2x4vs" podUID="fcbd566a-af50-4780-8266-ee914dd2878e" containerName="registry-server" containerID="cri-o://bd9cf89e86d2c934c609c0a59d702273fdcabdc1d0d0e35e50fb9a689c8a0f93" gracePeriod=2 Nov 25 23:23:07 crc kubenswrapper[5045]: I1125 23:23:07.792763 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wk2rz"] Nov 25 23:23:07 crc kubenswrapper[5045]: I1125 23:23:07.793254 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wk2rz" podUID="7bcbef17-d3a5-4905-a3fe-892353a31a1a" containerName="registry-server" containerID="cri-o://a034709e7708a17cb0b709909fb11ba00de72d6034ab2cd353ac396dad9e2f20" gracePeriod=2 Nov 25 23:23:07 crc kubenswrapper[5045]: I1125 23:23:07.869765 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2x4vs" Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.077246 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fcbd566a-af50-4780-8266-ee914dd2878e-catalog-content\") pod \"fcbd566a-af50-4780-8266-ee914dd2878e\" (UID: \"fcbd566a-af50-4780-8266-ee914dd2878e\") " Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.077640 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fcbd566a-af50-4780-8266-ee914dd2878e-utilities\") pod \"fcbd566a-af50-4780-8266-ee914dd2878e\" (UID: \"fcbd566a-af50-4780-8266-ee914dd2878e\") " Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.077837 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wzwtc\" (UniqueName: \"kubernetes.io/projected/fcbd566a-af50-4780-8266-ee914dd2878e-kube-api-access-wzwtc\") pod \"fcbd566a-af50-4780-8266-ee914dd2878e\" (UID: \"fcbd566a-af50-4780-8266-ee914dd2878e\") " Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.078671 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fcbd566a-af50-4780-8266-ee914dd2878e-utilities" (OuterVolumeSpecName: "utilities") pod "fcbd566a-af50-4780-8266-ee914dd2878e" (UID: "fcbd566a-af50-4780-8266-ee914dd2878e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.083602 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fcbd566a-af50-4780-8266-ee914dd2878e-kube-api-access-wzwtc" (OuterVolumeSpecName: "kube-api-access-wzwtc") pod "fcbd566a-af50-4780-8266-ee914dd2878e" (UID: "fcbd566a-af50-4780-8266-ee914dd2878e"). InnerVolumeSpecName "kube-api-access-wzwtc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.179625 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fcbd566a-af50-4780-8266-ee914dd2878e-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.179675 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wzwtc\" (UniqueName: \"kubernetes.io/projected/fcbd566a-af50-4780-8266-ee914dd2878e-kube-api-access-wzwtc\") on node \"crc\" DevicePath \"\"" Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.381645 5045 generic.go:334] "Generic (PLEG): container finished" podID="fcbd566a-af50-4780-8266-ee914dd2878e" containerID="bd9cf89e86d2c934c609c0a59d702273fdcabdc1d0d0e35e50fb9a689c8a0f93" exitCode=0 Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.381746 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2x4vs" event={"ID":"fcbd566a-af50-4780-8266-ee914dd2878e","Type":"ContainerDied","Data":"bd9cf89e86d2c934c609c0a59d702273fdcabdc1d0d0e35e50fb9a689c8a0f93"} Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.381806 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2x4vs" event={"ID":"fcbd566a-af50-4780-8266-ee914dd2878e","Type":"ContainerDied","Data":"6f935c7ecf4c6de8562d48e96a0ef240dada69b202a7fec5e5bc702cc757f231"} Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.381839 5045 scope.go:117] "RemoveContainer" containerID="bd9cf89e86d2c934c609c0a59d702273fdcabdc1d0d0e35e50fb9a689c8a0f93" Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.382060 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2x4vs" Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.414204 5045 scope.go:117] "RemoveContainer" containerID="b8fc245be20d6a31934899100a4c4c06ab0856c5ffc3df9af81520c07479d20d" Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.456544 5045 scope.go:117] "RemoveContainer" containerID="dc780dd9271c6f449a4d4d3afc78f2c2c5c6f9f51eb57f460432e6b4a44a17fb" Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.485086 5045 scope.go:117] "RemoveContainer" containerID="bd9cf89e86d2c934c609c0a59d702273fdcabdc1d0d0e35e50fb9a689c8a0f93" Nov 25 23:23:08 crc kubenswrapper[5045]: E1125 23:23:08.485768 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd9cf89e86d2c934c609c0a59d702273fdcabdc1d0d0e35e50fb9a689c8a0f93\": container with ID starting with bd9cf89e86d2c934c609c0a59d702273fdcabdc1d0d0e35e50fb9a689c8a0f93 not found: ID does not exist" containerID="bd9cf89e86d2c934c609c0a59d702273fdcabdc1d0d0e35e50fb9a689c8a0f93" Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.485967 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd9cf89e86d2c934c609c0a59d702273fdcabdc1d0d0e35e50fb9a689c8a0f93"} err="failed to get container status \"bd9cf89e86d2c934c609c0a59d702273fdcabdc1d0d0e35e50fb9a689c8a0f93\": rpc error: code = NotFound desc = could not find container \"bd9cf89e86d2c934c609c0a59d702273fdcabdc1d0d0e35e50fb9a689c8a0f93\": container with ID starting with bd9cf89e86d2c934c609c0a59d702273fdcabdc1d0d0e35e50fb9a689c8a0f93 not found: ID does not exist" Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.486019 5045 scope.go:117] "RemoveContainer" containerID="b8fc245be20d6a31934899100a4c4c06ab0856c5ffc3df9af81520c07479d20d" Nov 25 23:23:08 crc kubenswrapper[5045]: E1125 23:23:08.487077 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8fc245be20d6a31934899100a4c4c06ab0856c5ffc3df9af81520c07479d20d\": container with ID starting with b8fc245be20d6a31934899100a4c4c06ab0856c5ffc3df9af81520c07479d20d not found: ID does not exist" containerID="b8fc245be20d6a31934899100a4c4c06ab0856c5ffc3df9af81520c07479d20d" Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.487140 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8fc245be20d6a31934899100a4c4c06ab0856c5ffc3df9af81520c07479d20d"} err="failed to get container status \"b8fc245be20d6a31934899100a4c4c06ab0856c5ffc3df9af81520c07479d20d\": rpc error: code = NotFound desc = could not find container \"b8fc245be20d6a31934899100a4c4c06ab0856c5ffc3df9af81520c07479d20d\": container with ID starting with b8fc245be20d6a31934899100a4c4c06ab0856c5ffc3df9af81520c07479d20d not found: ID does not exist" Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.487169 5045 scope.go:117] "RemoveContainer" containerID="dc780dd9271c6f449a4d4d3afc78f2c2c5c6f9f51eb57f460432e6b4a44a17fb" Nov 25 23:23:08 crc kubenswrapper[5045]: E1125 23:23:08.487883 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc780dd9271c6f449a4d4d3afc78f2c2c5c6f9f51eb57f460432e6b4a44a17fb\": container with ID starting with dc780dd9271c6f449a4d4d3afc78f2c2c5c6f9f51eb57f460432e6b4a44a17fb not found: ID does not exist" containerID="dc780dd9271c6f449a4d4d3afc78f2c2c5c6f9f51eb57f460432e6b4a44a17fb" Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.487951 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc780dd9271c6f449a4d4d3afc78f2c2c5c6f9f51eb57f460432e6b4a44a17fb"} err="failed to get container status \"dc780dd9271c6f449a4d4d3afc78f2c2c5c6f9f51eb57f460432e6b4a44a17fb\": rpc error: code = NotFound desc = could not find container \"dc780dd9271c6f449a4d4d3afc78f2c2c5c6f9f51eb57f460432e6b4a44a17fb\": container with ID starting with dc780dd9271c6f449a4d4d3afc78f2c2c5c6f9f51eb57f460432e6b4a44a17fb not found: ID does not exist" Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.614296 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fcbd566a-af50-4780-8266-ee914dd2878e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fcbd566a-af50-4780-8266-ee914dd2878e" (UID: "fcbd566a-af50-4780-8266-ee914dd2878e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.689067 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fcbd566a-af50-4780-8266-ee914dd2878e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.729560 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2x4vs"] Nov 25 23:23:08 crc kubenswrapper[5045]: I1125 23:23:08.745769 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2x4vs"] Nov 25 23:23:09 crc kubenswrapper[5045]: I1125 23:23:09.415650 5045 generic.go:334] "Generic (PLEG): container finished" podID="7bcbef17-d3a5-4905-a3fe-892353a31a1a" containerID="a034709e7708a17cb0b709909fb11ba00de72d6034ab2cd353ac396dad9e2f20" exitCode=0 Nov 25 23:23:09 crc kubenswrapper[5045]: I1125 23:23:09.415704 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wk2rz" event={"ID":"7bcbef17-d3a5-4905-a3fe-892353a31a1a","Type":"ContainerDied","Data":"a034709e7708a17cb0b709909fb11ba00de72d6034ab2cd353ac396dad9e2f20"} Nov 25 23:23:09 crc kubenswrapper[5045]: I1125 23:23:09.542596 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wk2rz" Nov 25 23:23:09 crc kubenswrapper[5045]: I1125 23:23:09.727433 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pf6qg\" (UniqueName: \"kubernetes.io/projected/7bcbef17-d3a5-4905-a3fe-892353a31a1a-kube-api-access-pf6qg\") pod \"7bcbef17-d3a5-4905-a3fe-892353a31a1a\" (UID: \"7bcbef17-d3a5-4905-a3fe-892353a31a1a\") " Nov 25 23:23:09 crc kubenswrapper[5045]: I1125 23:23:09.727557 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bcbef17-d3a5-4905-a3fe-892353a31a1a-utilities\") pod \"7bcbef17-d3a5-4905-a3fe-892353a31a1a\" (UID: \"7bcbef17-d3a5-4905-a3fe-892353a31a1a\") " Nov 25 23:23:09 crc kubenswrapper[5045]: I1125 23:23:09.727756 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bcbef17-d3a5-4905-a3fe-892353a31a1a-catalog-content\") pod \"7bcbef17-d3a5-4905-a3fe-892353a31a1a\" (UID: \"7bcbef17-d3a5-4905-a3fe-892353a31a1a\") " Nov 25 23:23:09 crc kubenswrapper[5045]: I1125 23:23:09.729916 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bcbef17-d3a5-4905-a3fe-892353a31a1a-utilities" (OuterVolumeSpecName: "utilities") pod "7bcbef17-d3a5-4905-a3fe-892353a31a1a" (UID: "7bcbef17-d3a5-4905-a3fe-892353a31a1a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:23:09 crc kubenswrapper[5045]: I1125 23:23:09.736249 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bcbef17-d3a5-4905-a3fe-892353a31a1a-kube-api-access-pf6qg" (OuterVolumeSpecName: "kube-api-access-pf6qg") pod "7bcbef17-d3a5-4905-a3fe-892353a31a1a" (UID: "7bcbef17-d3a5-4905-a3fe-892353a31a1a"). InnerVolumeSpecName "kube-api-access-pf6qg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:23:09 crc kubenswrapper[5045]: I1125 23:23:09.758542 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bcbef17-d3a5-4905-a3fe-892353a31a1a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7bcbef17-d3a5-4905-a3fe-892353a31a1a" (UID: "7bcbef17-d3a5-4905-a3fe-892353a31a1a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:23:09 crc kubenswrapper[5045]: I1125 23:23:09.830306 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bcbef17-d3a5-4905-a3fe-892353a31a1a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:23:09 crc kubenswrapper[5045]: I1125 23:23:09.830363 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pf6qg\" (UniqueName: \"kubernetes.io/projected/7bcbef17-d3a5-4905-a3fe-892353a31a1a-kube-api-access-pf6qg\") on node \"crc\" DevicePath \"\"" Nov 25 23:23:09 crc kubenswrapper[5045]: I1125 23:23:09.830387 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bcbef17-d3a5-4905-a3fe-892353a31a1a-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:23:10 crc kubenswrapper[5045]: I1125 23:23:10.414888 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fcbd566a-af50-4780-8266-ee914dd2878e" path="/var/lib/kubelet/pods/fcbd566a-af50-4780-8266-ee914dd2878e/volumes" Nov 25 23:23:10 crc kubenswrapper[5045]: I1125 23:23:10.440952 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wk2rz" event={"ID":"7bcbef17-d3a5-4905-a3fe-892353a31a1a","Type":"ContainerDied","Data":"0efb024b56f67e67d13cc8fdbd687a0c493a130f12f41f55d8c90b5edf68781c"} Nov 25 23:23:10 crc kubenswrapper[5045]: I1125 23:23:10.442085 5045 scope.go:117] "RemoveContainer" containerID="a034709e7708a17cb0b709909fb11ba00de72d6034ab2cd353ac396dad9e2f20" Nov 25 23:23:10 crc kubenswrapper[5045]: I1125 23:23:10.441110 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wk2rz" Nov 25 23:23:10 crc kubenswrapper[5045]: I1125 23:23:10.494636 5045 scope.go:117] "RemoveContainer" containerID="91e07a189be80065a63b880572f76fded844b350c94a92a32a9a354f8d207f8c" Nov 25 23:23:10 crc kubenswrapper[5045]: I1125 23:23:10.507038 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wk2rz"] Nov 25 23:23:10 crc kubenswrapper[5045]: I1125 23:23:10.517277 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-wk2rz"] Nov 25 23:23:10 crc kubenswrapper[5045]: I1125 23:23:10.518045 5045 scope.go:117] "RemoveContainer" containerID="64751012028a080f1dfa1fc7d27eacc8afef771a3ec8b5a521f35157cef55474" Nov 25 23:23:12 crc kubenswrapper[5045]: I1125 23:23:12.416672 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bcbef17-d3a5-4905-a3fe-892353a31a1a" path="/var/lib/kubelet/pods/7bcbef17-d3a5-4905-a3fe-892353a31a1a/volumes" Nov 25 23:24:17 crc kubenswrapper[5045]: I1125 23:24:17.549903 5045 scope.go:117] "RemoveContainer" containerID="aebede3e1e21acb7317b76aa4914dcd7b9f7422f4ae8d79f3b9ef122da51bee6" Nov 25 23:24:17 crc kubenswrapper[5045]: I1125 23:24:17.608204 5045 scope.go:117] "RemoveContainer" containerID="8a11996119b6d30f900e89c829331367f50c87e5755083d713c6178e2570c206" Nov 25 23:24:17 crc kubenswrapper[5045]: I1125 23:24:17.677449 5045 scope.go:117] "RemoveContainer" containerID="c7cff6530f101a05ce21cecd01136de0bd933cc5cf7d052324eae5f192674055" Nov 25 23:24:30 crc kubenswrapper[5045]: I1125 23:24:30.540813 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:24:30 crc kubenswrapper[5045]: I1125 23:24:30.541628 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:25:00 crc kubenswrapper[5045]: I1125 23:25:00.540945 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:25:00 crc kubenswrapper[5045]: I1125 23:25:00.542038 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:25:17 crc kubenswrapper[5045]: I1125 23:25:17.837951 5045 scope.go:117] "RemoveContainer" containerID="59e90f7f643b3d6f5b5e02f51e68b103d5e8636f11ddb89f51644eddfce1d4ec" Nov 25 23:25:17 crc kubenswrapper[5045]: I1125 23:25:17.879563 5045 scope.go:117] "RemoveContainer" containerID="142088eeab9eb9a65ddbc000fc03e76c306a501e3634df6ec644771acecdaf13" Nov 25 23:25:30 crc kubenswrapper[5045]: I1125 23:25:30.541112 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:25:30 crc kubenswrapper[5045]: I1125 23:25:30.541821 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:25:30 crc kubenswrapper[5045]: I1125 23:25:30.541886 5045 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 23:25:30 crc kubenswrapper[5045]: I1125 23:25:30.542673 5045 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708"} pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 23:25:30 crc kubenswrapper[5045]: I1125 23:25:30.542812 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" containerID="cri-o://74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" gracePeriod=600 Nov 25 23:25:30 crc kubenswrapper[5045]: E1125 23:25:30.674161 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:25:31 crc kubenswrapper[5045]: I1125 23:25:31.189151 5045 generic.go:334] "Generic (PLEG): container finished" podID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" exitCode=0 Nov 25 23:25:31 crc kubenswrapper[5045]: I1125 23:25:31.189212 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerDied","Data":"74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708"} Nov 25 23:25:31 crc kubenswrapper[5045]: I1125 23:25:31.189259 5045 scope.go:117] "RemoveContainer" containerID="a38112312cf3b5e81d6e73cbd55baa1467e1d3453486aaf8dbc83baabfa5d34a" Nov 25 23:25:31 crc kubenswrapper[5045]: I1125 23:25:31.190292 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:25:31 crc kubenswrapper[5045]: E1125 23:25:31.190811 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:25:42 crc kubenswrapper[5045]: I1125 23:25:42.396487 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:25:42 crc kubenswrapper[5045]: E1125 23:25:42.397514 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:25:54 crc kubenswrapper[5045]: I1125 23:25:54.424704 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:25:54 crc kubenswrapper[5045]: E1125 23:25:54.426352 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:25:59 crc kubenswrapper[5045]: I1125 23:25:59.553134 5045 generic.go:334] "Generic (PLEG): container finished" podID="42a18661-c8fa-41a6-8480-5547871c51d6" containerID="66fad4c6427ab782a5d08fad5a3fde65cec2ada061559dc47c263f8f442a0d90" exitCode=0 Nov 25 23:25:59 crc kubenswrapper[5045]: I1125 23:25:59.553235 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" event={"ID":"42a18661-c8fa-41a6-8480-5547871c51d6","Type":"ContainerDied","Data":"66fad4c6427ab782a5d08fad5a3fde65cec2ada061559dc47c263f8f442a0d90"} Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.138866 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.220273 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42a18661-c8fa-41a6-8480-5547871c51d6-inventory\") pod \"42a18661-c8fa-41a6-8480-5547871c51d6\" (UID: \"42a18661-c8fa-41a6-8480-5547871c51d6\") " Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.220650 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/42a18661-c8fa-41a6-8480-5547871c51d6-ssh-key\") pod \"42a18661-c8fa-41a6-8480-5547871c51d6\" (UID: \"42a18661-c8fa-41a6-8480-5547871c51d6\") " Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.220846 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmpbg\" (UniqueName: \"kubernetes.io/projected/42a18661-c8fa-41a6-8480-5547871c51d6-kube-api-access-zmpbg\") pod \"42a18661-c8fa-41a6-8480-5547871c51d6\" (UID: \"42a18661-c8fa-41a6-8480-5547871c51d6\") " Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.220988 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42a18661-c8fa-41a6-8480-5547871c51d6-bootstrap-combined-ca-bundle\") pod \"42a18661-c8fa-41a6-8480-5547871c51d6\" (UID: \"42a18661-c8fa-41a6-8480-5547871c51d6\") " Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.226638 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42a18661-c8fa-41a6-8480-5547871c51d6-kube-api-access-zmpbg" (OuterVolumeSpecName: "kube-api-access-zmpbg") pod "42a18661-c8fa-41a6-8480-5547871c51d6" (UID: "42a18661-c8fa-41a6-8480-5547871c51d6"). InnerVolumeSpecName "kube-api-access-zmpbg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.226763 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42a18661-c8fa-41a6-8480-5547871c51d6-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "42a18661-c8fa-41a6-8480-5547871c51d6" (UID: "42a18661-c8fa-41a6-8480-5547871c51d6"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.255706 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42a18661-c8fa-41a6-8480-5547871c51d6-inventory" (OuterVolumeSpecName: "inventory") pod "42a18661-c8fa-41a6-8480-5547871c51d6" (UID: "42a18661-c8fa-41a6-8480-5547871c51d6"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.262660 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42a18661-c8fa-41a6-8480-5547871c51d6-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "42a18661-c8fa-41a6-8480-5547871c51d6" (UID: "42a18661-c8fa-41a6-8480-5547871c51d6"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.323468 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/42a18661-c8fa-41a6-8480-5547871c51d6-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.323515 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zmpbg\" (UniqueName: \"kubernetes.io/projected/42a18661-c8fa-41a6-8480-5547871c51d6-kube-api-access-zmpbg\") on node \"crc\" DevicePath \"\"" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.323538 5045 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42a18661-c8fa-41a6-8480-5547871c51d6-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.323557 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42a18661-c8fa-41a6-8480-5547871c51d6-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.589016 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" event={"ID":"42a18661-c8fa-41a6-8480-5547871c51d6","Type":"ContainerDied","Data":"a968c5b9efb2a4b26e7c2bf2aac23a2c566d946df05264c23a45f53ee7f2c1af"} Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.589065 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a968c5b9efb2a4b26e7c2bf2aac23a2c566d946df05264c23a45f53ee7f2c1af" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.589139 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.683357 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8"] Nov 25 23:26:01 crc kubenswrapper[5045]: E1125 23:26:01.683744 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bcbef17-d3a5-4905-a3fe-892353a31a1a" containerName="extract-content" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.683761 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bcbef17-d3a5-4905-a3fe-892353a31a1a" containerName="extract-content" Nov 25 23:26:01 crc kubenswrapper[5045]: E1125 23:26:01.683777 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bcbef17-d3a5-4905-a3fe-892353a31a1a" containerName="extract-utilities" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.683784 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bcbef17-d3a5-4905-a3fe-892353a31a1a" containerName="extract-utilities" Nov 25 23:26:01 crc kubenswrapper[5045]: E1125 23:26:01.683794 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcbd566a-af50-4780-8266-ee914dd2878e" containerName="extract-content" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.683800 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcbd566a-af50-4780-8266-ee914dd2878e" containerName="extract-content" Nov 25 23:26:01 crc kubenswrapper[5045]: E1125 23:26:01.683814 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcbd566a-af50-4780-8266-ee914dd2878e" containerName="extract-utilities" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.683819 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcbd566a-af50-4780-8266-ee914dd2878e" containerName="extract-utilities" Nov 25 23:26:01 crc kubenswrapper[5045]: E1125 23:26:01.683831 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42a18661-c8fa-41a6-8480-5547871c51d6" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.683839 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="42a18661-c8fa-41a6-8480-5547871c51d6" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 23:26:01 crc kubenswrapper[5045]: E1125 23:26:01.683848 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcbd566a-af50-4780-8266-ee914dd2878e" containerName="registry-server" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.683854 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcbd566a-af50-4780-8266-ee914dd2878e" containerName="registry-server" Nov 25 23:26:01 crc kubenswrapper[5045]: E1125 23:26:01.683866 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bcbef17-d3a5-4905-a3fe-892353a31a1a" containerName="registry-server" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.683871 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bcbef17-d3a5-4905-a3fe-892353a31a1a" containerName="registry-server" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.684020 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="42a18661-c8fa-41a6-8480-5547871c51d6" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.684041 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bcbef17-d3a5-4905-a3fe-892353a31a1a" containerName="registry-server" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.684053 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="fcbd566a-af50-4780-8266-ee914dd2878e" containerName="registry-server" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.684597 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.688901 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.689176 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.689322 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.689431 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.707185 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8"] Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.832331 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9887c8c6-b00b-4415-b655-c127f62e3474-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8\" (UID: \"9887c8c6-b00b-4415-b655-c127f62e3474\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.832382 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4r47\" (UniqueName: \"kubernetes.io/projected/9887c8c6-b00b-4415-b655-c127f62e3474-kube-api-access-q4r47\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8\" (UID: \"9887c8c6-b00b-4415-b655-c127f62e3474\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.832459 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9887c8c6-b00b-4415-b655-c127f62e3474-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8\" (UID: \"9887c8c6-b00b-4415-b655-c127f62e3474\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.934742 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9887c8c6-b00b-4415-b655-c127f62e3474-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8\" (UID: \"9887c8c6-b00b-4415-b655-c127f62e3474\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.935149 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9887c8c6-b00b-4415-b655-c127f62e3474-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8\" (UID: \"9887c8c6-b00b-4415-b655-c127f62e3474\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.935177 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4r47\" (UniqueName: \"kubernetes.io/projected/9887c8c6-b00b-4415-b655-c127f62e3474-kube-api-access-q4r47\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8\" (UID: \"9887c8c6-b00b-4415-b655-c127f62e3474\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.940166 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9887c8c6-b00b-4415-b655-c127f62e3474-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8\" (UID: \"9887c8c6-b00b-4415-b655-c127f62e3474\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.941027 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9887c8c6-b00b-4415-b655-c127f62e3474-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8\" (UID: \"9887c8c6-b00b-4415-b655-c127f62e3474\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8" Nov 25 23:26:01 crc kubenswrapper[5045]: I1125 23:26:01.949894 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4r47\" (UniqueName: \"kubernetes.io/projected/9887c8c6-b00b-4415-b655-c127f62e3474-kube-api-access-q4r47\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8\" (UID: \"9887c8c6-b00b-4415-b655-c127f62e3474\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8" Nov 25 23:26:02 crc kubenswrapper[5045]: I1125 23:26:02.006432 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8" Nov 25 23:26:02 crc kubenswrapper[5045]: I1125 23:26:02.656690 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8"] Nov 25 23:26:03 crc kubenswrapper[5045]: I1125 23:26:03.609239 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8" event={"ID":"9887c8c6-b00b-4415-b655-c127f62e3474","Type":"ContainerStarted","Data":"1c4141d685bfa1ed55039a7a3f1807cc078385db7c9bef89942c6f21ac928cfc"} Nov 25 23:26:03 crc kubenswrapper[5045]: I1125 23:26:03.609621 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8" event={"ID":"9887c8c6-b00b-4415-b655-c127f62e3474","Type":"ContainerStarted","Data":"7cdc615ea891f9a290900831d43f6aa8f80664befd0c3a364bb2e72adced2163"} Nov 25 23:26:03 crc kubenswrapper[5045]: I1125 23:26:03.641490 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8" podStartSLOduration=2.122958445 podStartE2EDuration="2.64146544s" podCreationTimestamp="2025-11-25 23:26:01 +0000 UTC" firstStartedPulling="2025-11-25 23:26:02.665533894 +0000 UTC m=+1619.023193046" lastFinishedPulling="2025-11-25 23:26:03.184040899 +0000 UTC m=+1619.541700041" observedRunningTime="2025-11-25 23:26:03.632075967 +0000 UTC m=+1619.989735119" watchObservedRunningTime="2025-11-25 23:26:03.64146544 +0000 UTC m=+1619.999124562" Nov 25 23:26:05 crc kubenswrapper[5045]: I1125 23:26:05.396812 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:26:05 crc kubenswrapper[5045]: E1125 23:26:05.397344 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:26:17 crc kubenswrapper[5045]: I1125 23:26:17.979787 5045 scope.go:117] "RemoveContainer" containerID="21a5b2c079233b35a3de8f72476ff5e347adb7408b76fc842cef38ee8142eef9" Nov 25 23:26:18 crc kubenswrapper[5045]: I1125 23:26:18.018185 5045 scope.go:117] "RemoveContainer" containerID="9370d113c6168fcea4b804b333e1a13ec2c470c3ff53d8ccd56ee648ed2e27da" Nov 25 23:26:18 crc kubenswrapper[5045]: I1125 23:26:18.049996 5045 scope.go:117] "RemoveContainer" containerID="4a3fd3c1df741d1243f675d495a6796d9edf95568fb448bc5db7b90cf70cce12" Nov 25 23:26:19 crc kubenswrapper[5045]: I1125 23:26:19.397568 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:26:19 crc kubenswrapper[5045]: E1125 23:26:19.398847 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:26:34 crc kubenswrapper[5045]: I1125 23:26:34.411891 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:26:34 crc kubenswrapper[5045]: E1125 23:26:34.412875 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:26:45 crc kubenswrapper[5045]: I1125 23:26:45.396493 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:26:45 crc kubenswrapper[5045]: E1125 23:26:45.397911 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:26:58 crc kubenswrapper[5045]: I1125 23:26:58.397228 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:26:58 crc kubenswrapper[5045]: E1125 23:26:58.398230 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:27:09 crc kubenswrapper[5045]: I1125 23:27:09.051465 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-d5x24"] Nov 25 23:27:09 crc kubenswrapper[5045]: I1125 23:27:09.060936 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-0776-account-create-update-v2jfl"] Nov 25 23:27:09 crc kubenswrapper[5045]: I1125 23:27:09.069903 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-d5x24"] Nov 25 23:27:09 crc kubenswrapper[5045]: I1125 23:27:09.077251 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-0776-account-create-update-v2jfl"] Nov 25 23:27:10 crc kubenswrapper[5045]: I1125 23:27:10.397479 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:27:10 crc kubenswrapper[5045]: E1125 23:27:10.397948 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:27:10 crc kubenswrapper[5045]: I1125 23:27:10.413163 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4df595bc-d45d-4560-9203-c57daba13a4b" path="/var/lib/kubelet/pods/4df595bc-d45d-4560-9203-c57daba13a4b/volumes" Nov 25 23:27:10 crc kubenswrapper[5045]: I1125 23:27:10.414284 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc65709d-5a60-4967-a2fa-21df985f4f82" path="/var/lib/kubelet/pods/bc65709d-5a60-4967-a2fa-21df985f4f82/volumes" Nov 25 23:27:11 crc kubenswrapper[5045]: I1125 23:27:11.064341 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-lf45m"] Nov 25 23:27:11 crc kubenswrapper[5045]: I1125 23:27:11.075626 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-lf45m"] Nov 25 23:27:11 crc kubenswrapper[5045]: I1125 23:27:11.083664 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-59af-account-create-update-v57d9"] Nov 25 23:27:11 crc kubenswrapper[5045]: I1125 23:27:11.090505 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-82cz2"] Nov 25 23:27:11 crc kubenswrapper[5045]: I1125 23:27:11.096586 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-82cz2"] Nov 25 23:27:11 crc kubenswrapper[5045]: I1125 23:27:11.102821 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-59af-account-create-update-v57d9"] Nov 25 23:27:12 crc kubenswrapper[5045]: I1125 23:27:12.047983 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-6b4a-account-create-update-rm6k5"] Nov 25 23:27:12 crc kubenswrapper[5045]: I1125 23:27:12.063663 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-6b4a-account-create-update-rm6k5"] Nov 25 23:27:12 crc kubenswrapper[5045]: I1125 23:27:12.407980 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24819069-fb36-4a4b-ae07-9f7d67276eb3" path="/var/lib/kubelet/pods/24819069-fb36-4a4b-ae07-9f7d67276eb3/volumes" Nov 25 23:27:12 crc kubenswrapper[5045]: I1125 23:27:12.408532 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65dd8927-af0c-4e6e-99d8-7990e24a959c" path="/var/lib/kubelet/pods/65dd8927-af0c-4e6e-99d8-7990e24a959c/volumes" Nov 25 23:27:12 crc kubenswrapper[5045]: I1125 23:27:12.409061 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ac58ff5-0d42-4ac9-b966-d628a21a5c91" path="/var/lib/kubelet/pods/8ac58ff5-0d42-4ac9-b966-d628a21a5c91/volumes" Nov 25 23:27:12 crc kubenswrapper[5045]: I1125 23:27:12.409583 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a37b2f94-c1a1-41d7-8f92-baade66a55fa" path="/var/lib/kubelet/pods/a37b2f94-c1a1-41d7-8f92-baade66a55fa/volumes" Nov 25 23:27:17 crc kubenswrapper[5045]: I1125 23:27:17.465941 5045 generic.go:334] "Generic (PLEG): container finished" podID="9887c8c6-b00b-4415-b655-c127f62e3474" containerID="1c4141d685bfa1ed55039a7a3f1807cc078385db7c9bef89942c6f21ac928cfc" exitCode=0 Nov 25 23:27:17 crc kubenswrapper[5045]: I1125 23:27:17.465992 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8" event={"ID":"9887c8c6-b00b-4415-b655-c127f62e3474","Type":"ContainerDied","Data":"1c4141d685bfa1ed55039a7a3f1807cc078385db7c9bef89942c6f21ac928cfc"} Nov 25 23:27:18 crc kubenswrapper[5045]: I1125 23:27:18.141456 5045 scope.go:117] "RemoveContainer" containerID="07c9aa194f0874c5b2e76aef4634a9365f72d06293d866714a67e8b1cb52173b" Nov 25 23:27:18 crc kubenswrapper[5045]: I1125 23:27:18.175321 5045 scope.go:117] "RemoveContainer" containerID="baaee5ce924f8633f6b8d0b906fe203d5b4c7c31199dd5fc3dee4bc4364004e3" Nov 25 23:27:18 crc kubenswrapper[5045]: I1125 23:27:18.253152 5045 scope.go:117] "RemoveContainer" containerID="5e6d6260f96dc437b08a62ae214aef03f28fa362c61c2019c45d6581badbae76" Nov 25 23:27:18 crc kubenswrapper[5045]: I1125 23:27:18.314736 5045 scope.go:117] "RemoveContainer" containerID="936f7fea413558d85250c41e02ed1f30acfb2d9a6242694bdbfc29de296c93df" Nov 25 23:27:18 crc kubenswrapper[5045]: I1125 23:27:18.391475 5045 scope.go:117] "RemoveContainer" containerID="65859d31a91ea1e129e603efda822d5a8aebbe48cd4ca2b5f84ea209a9661f91" Nov 25 23:27:18 crc kubenswrapper[5045]: I1125 23:27:18.419702 5045 scope.go:117] "RemoveContainer" containerID="4ce7f8d7e27b2dd0cbae3334aaa921da50a2b3dcfeaa9afd11cc032dc1207385" Nov 25 23:27:18 crc kubenswrapper[5045]: I1125 23:27:18.928244 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.049664 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9887c8c6-b00b-4415-b655-c127f62e3474-inventory\") pod \"9887c8c6-b00b-4415-b655-c127f62e3474\" (UID: \"9887c8c6-b00b-4415-b655-c127f62e3474\") " Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.049971 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4r47\" (UniqueName: \"kubernetes.io/projected/9887c8c6-b00b-4415-b655-c127f62e3474-kube-api-access-q4r47\") pod \"9887c8c6-b00b-4415-b655-c127f62e3474\" (UID: \"9887c8c6-b00b-4415-b655-c127f62e3474\") " Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.050069 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9887c8c6-b00b-4415-b655-c127f62e3474-ssh-key\") pod \"9887c8c6-b00b-4415-b655-c127f62e3474\" (UID: \"9887c8c6-b00b-4415-b655-c127f62e3474\") " Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.055433 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9887c8c6-b00b-4415-b655-c127f62e3474-kube-api-access-q4r47" (OuterVolumeSpecName: "kube-api-access-q4r47") pod "9887c8c6-b00b-4415-b655-c127f62e3474" (UID: "9887c8c6-b00b-4415-b655-c127f62e3474"). InnerVolumeSpecName "kube-api-access-q4r47". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.075838 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9887c8c6-b00b-4415-b655-c127f62e3474-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9887c8c6-b00b-4415-b655-c127f62e3474" (UID: "9887c8c6-b00b-4415-b655-c127f62e3474"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.095848 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9887c8c6-b00b-4415-b655-c127f62e3474-inventory" (OuterVolumeSpecName: "inventory") pod "9887c8c6-b00b-4415-b655-c127f62e3474" (UID: "9887c8c6-b00b-4415-b655-c127f62e3474"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.152676 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9887c8c6-b00b-4415-b655-c127f62e3474-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.152752 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4r47\" (UniqueName: \"kubernetes.io/projected/9887c8c6-b00b-4415-b655-c127f62e3474-kube-api-access-q4r47\") on node \"crc\" DevicePath \"\"" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.152777 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9887c8c6-b00b-4415-b655-c127f62e3474-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.494310 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8" event={"ID":"9887c8c6-b00b-4415-b655-c127f62e3474","Type":"ContainerDied","Data":"7cdc615ea891f9a290900831d43f6aa8f80664befd0c3a364bb2e72adced2163"} Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.495021 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7cdc615ea891f9a290900831d43f6aa8f80664befd0c3a364bb2e72adced2163" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.494391 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.600664 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74"] Nov 25 23:27:19 crc kubenswrapper[5045]: E1125 23:27:19.601188 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9887c8c6-b00b-4415-b655-c127f62e3474" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.601223 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="9887c8c6-b00b-4415-b655-c127f62e3474" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.601524 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="9887c8c6-b00b-4415-b655-c127f62e3474" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.602399 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.605586 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.606073 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.606244 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.606548 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.629366 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74"] Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.763626 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/98cd627c-ae90-4321-8046-ca0356bb9b7c-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-nxg74\" (UID: \"98cd627c-ae90-4321-8046-ca0356bb9b7c\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.763703 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqwk6\" (UniqueName: \"kubernetes.io/projected/98cd627c-ae90-4321-8046-ca0356bb9b7c-kube-api-access-dqwk6\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-nxg74\" (UID: \"98cd627c-ae90-4321-8046-ca0356bb9b7c\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.764900 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/98cd627c-ae90-4321-8046-ca0356bb9b7c-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-nxg74\" (UID: \"98cd627c-ae90-4321-8046-ca0356bb9b7c\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.867832 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/98cd627c-ae90-4321-8046-ca0356bb9b7c-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-nxg74\" (UID: \"98cd627c-ae90-4321-8046-ca0356bb9b7c\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.868047 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqwk6\" (UniqueName: \"kubernetes.io/projected/98cd627c-ae90-4321-8046-ca0356bb9b7c-kube-api-access-dqwk6\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-nxg74\" (UID: \"98cd627c-ae90-4321-8046-ca0356bb9b7c\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.868251 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/98cd627c-ae90-4321-8046-ca0356bb9b7c-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-nxg74\" (UID: \"98cd627c-ae90-4321-8046-ca0356bb9b7c\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.872023 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/98cd627c-ae90-4321-8046-ca0356bb9b7c-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-nxg74\" (UID: \"98cd627c-ae90-4321-8046-ca0356bb9b7c\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.873229 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/98cd627c-ae90-4321-8046-ca0356bb9b7c-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-nxg74\" (UID: \"98cd627c-ae90-4321-8046-ca0356bb9b7c\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.888686 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqwk6\" (UniqueName: \"kubernetes.io/projected/98cd627c-ae90-4321-8046-ca0356bb9b7c-kube-api-access-dqwk6\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-nxg74\" (UID: \"98cd627c-ae90-4321-8046-ca0356bb9b7c\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74" Nov 25 23:27:19 crc kubenswrapper[5045]: I1125 23:27:19.931756 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74" Nov 25 23:27:20 crc kubenswrapper[5045]: I1125 23:27:20.543200 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74"] Nov 25 23:27:20 crc kubenswrapper[5045]: I1125 23:27:20.554562 5045 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 23:27:21 crc kubenswrapper[5045]: I1125 23:27:21.516191 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74" event={"ID":"98cd627c-ae90-4321-8046-ca0356bb9b7c","Type":"ContainerStarted","Data":"933b07dddfb1228622c0f32e2f206a5c7170738dd6610e9f9c8a01313116310a"} Nov 25 23:27:21 crc kubenswrapper[5045]: I1125 23:27:21.516980 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74" event={"ID":"98cd627c-ae90-4321-8046-ca0356bb9b7c","Type":"ContainerStarted","Data":"af7bc224a32d37ea694790e03060e53af6469a041570878d3429bf9453a31b9c"} Nov 25 23:27:21 crc kubenswrapper[5045]: I1125 23:27:21.561135 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74" podStartSLOduration=1.9550133189999999 podStartE2EDuration="2.561104771s" podCreationTimestamp="2025-11-25 23:27:19 +0000 UTC" firstStartedPulling="2025-11-25 23:27:20.554327869 +0000 UTC m=+1696.911986981" lastFinishedPulling="2025-11-25 23:27:21.160419321 +0000 UTC m=+1697.518078433" observedRunningTime="2025-11-25 23:27:21.54078606 +0000 UTC m=+1697.898445212" watchObservedRunningTime="2025-11-25 23:27:21.561104771 +0000 UTC m=+1697.918763923" Nov 25 23:27:22 crc kubenswrapper[5045]: I1125 23:27:22.397972 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:27:22 crc kubenswrapper[5045]: E1125 23:27:22.398503 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:27:27 crc kubenswrapper[5045]: I1125 23:27:27.577750 5045 generic.go:334] "Generic (PLEG): container finished" podID="98cd627c-ae90-4321-8046-ca0356bb9b7c" containerID="933b07dddfb1228622c0f32e2f206a5c7170738dd6610e9f9c8a01313116310a" exitCode=0 Nov 25 23:27:27 crc kubenswrapper[5045]: I1125 23:27:27.578146 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74" event={"ID":"98cd627c-ae90-4321-8046-ca0356bb9b7c","Type":"ContainerDied","Data":"933b07dddfb1228622c0f32e2f206a5c7170738dd6610e9f9c8a01313116310a"} Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.055283 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.178070 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/98cd627c-ae90-4321-8046-ca0356bb9b7c-inventory\") pod \"98cd627c-ae90-4321-8046-ca0356bb9b7c\" (UID: \"98cd627c-ae90-4321-8046-ca0356bb9b7c\") " Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.178144 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/98cd627c-ae90-4321-8046-ca0356bb9b7c-ssh-key\") pod \"98cd627c-ae90-4321-8046-ca0356bb9b7c\" (UID: \"98cd627c-ae90-4321-8046-ca0356bb9b7c\") " Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.178360 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dqwk6\" (UniqueName: \"kubernetes.io/projected/98cd627c-ae90-4321-8046-ca0356bb9b7c-kube-api-access-dqwk6\") pod \"98cd627c-ae90-4321-8046-ca0356bb9b7c\" (UID: \"98cd627c-ae90-4321-8046-ca0356bb9b7c\") " Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.184958 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98cd627c-ae90-4321-8046-ca0356bb9b7c-kube-api-access-dqwk6" (OuterVolumeSpecName: "kube-api-access-dqwk6") pod "98cd627c-ae90-4321-8046-ca0356bb9b7c" (UID: "98cd627c-ae90-4321-8046-ca0356bb9b7c"). InnerVolumeSpecName "kube-api-access-dqwk6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.205183 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98cd627c-ae90-4321-8046-ca0356bb9b7c-inventory" (OuterVolumeSpecName: "inventory") pod "98cd627c-ae90-4321-8046-ca0356bb9b7c" (UID: "98cd627c-ae90-4321-8046-ca0356bb9b7c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.210622 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98cd627c-ae90-4321-8046-ca0356bb9b7c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "98cd627c-ae90-4321-8046-ca0356bb9b7c" (UID: "98cd627c-ae90-4321-8046-ca0356bb9b7c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.280544 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dqwk6\" (UniqueName: \"kubernetes.io/projected/98cd627c-ae90-4321-8046-ca0356bb9b7c-kube-api-access-dqwk6\") on node \"crc\" DevicePath \"\"" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.280584 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/98cd627c-ae90-4321-8046-ca0356bb9b7c-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.280598 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/98cd627c-ae90-4321-8046-ca0356bb9b7c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.649663 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74" event={"ID":"98cd627c-ae90-4321-8046-ca0356bb9b7c","Type":"ContainerDied","Data":"af7bc224a32d37ea694790e03060e53af6469a041570878d3429bf9453a31b9c"} Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.649737 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="af7bc224a32d37ea694790e03060e53af6469a041570878d3429bf9453a31b9c" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.649846 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.741156 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd"] Nov 25 23:27:29 crc kubenswrapper[5045]: E1125 23:27:29.741490 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98cd627c-ae90-4321-8046-ca0356bb9b7c" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.741507 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="98cd627c-ae90-4321-8046-ca0356bb9b7c" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.741677 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="98cd627c-ae90-4321-8046-ca0356bb9b7c" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.749065 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.750863 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.754402 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd"] Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.754885 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.755057 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.770742 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.890984 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d3ed9cbb-dcd0-40c2-87b5-57087c5e5855-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-h4tbd\" (UID: \"d3ed9cbb-dcd0-40c2-87b5-57087c5e5855\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.891036 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d3ed9cbb-dcd0-40c2-87b5-57087c5e5855-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-h4tbd\" (UID: \"d3ed9cbb-dcd0-40c2-87b5-57087c5e5855\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.891112 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sxgzb\" (UniqueName: \"kubernetes.io/projected/d3ed9cbb-dcd0-40c2-87b5-57087c5e5855-kube-api-access-sxgzb\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-h4tbd\" (UID: \"d3ed9cbb-dcd0-40c2-87b5-57087c5e5855\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.992224 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d3ed9cbb-dcd0-40c2-87b5-57087c5e5855-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-h4tbd\" (UID: \"d3ed9cbb-dcd0-40c2-87b5-57087c5e5855\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.992495 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d3ed9cbb-dcd0-40c2-87b5-57087c5e5855-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-h4tbd\" (UID: \"d3ed9cbb-dcd0-40c2-87b5-57087c5e5855\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.992691 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sxgzb\" (UniqueName: \"kubernetes.io/projected/d3ed9cbb-dcd0-40c2-87b5-57087c5e5855-kube-api-access-sxgzb\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-h4tbd\" (UID: \"d3ed9cbb-dcd0-40c2-87b5-57087c5e5855\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.996022 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d3ed9cbb-dcd0-40c2-87b5-57087c5e5855-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-h4tbd\" (UID: \"d3ed9cbb-dcd0-40c2-87b5-57087c5e5855\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd" Nov 25 23:27:29 crc kubenswrapper[5045]: I1125 23:27:29.996532 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d3ed9cbb-dcd0-40c2-87b5-57087c5e5855-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-h4tbd\" (UID: \"d3ed9cbb-dcd0-40c2-87b5-57087c5e5855\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd" Nov 25 23:27:30 crc kubenswrapper[5045]: I1125 23:27:30.010345 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sxgzb\" (UniqueName: \"kubernetes.io/projected/d3ed9cbb-dcd0-40c2-87b5-57087c5e5855-kube-api-access-sxgzb\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-h4tbd\" (UID: \"d3ed9cbb-dcd0-40c2-87b5-57087c5e5855\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd" Nov 25 23:27:30 crc kubenswrapper[5045]: I1125 23:27:30.068172 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd" Nov 25 23:27:30 crc kubenswrapper[5045]: I1125 23:27:30.436991 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd"] Nov 25 23:27:30 crc kubenswrapper[5045]: W1125 23:27:30.442400 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3ed9cbb_dcd0_40c2_87b5_57087c5e5855.slice/crio-3e283bf7155105735e26e34dbed3ef483a96054c6807a36b44a6fb4b40149ebc WatchSource:0}: Error finding container 3e283bf7155105735e26e34dbed3ef483a96054c6807a36b44a6fb4b40149ebc: Status 404 returned error can't find the container with id 3e283bf7155105735e26e34dbed3ef483a96054c6807a36b44a6fb4b40149ebc Nov 25 23:27:30 crc kubenswrapper[5045]: I1125 23:27:30.660743 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd" event={"ID":"d3ed9cbb-dcd0-40c2-87b5-57087c5e5855","Type":"ContainerStarted","Data":"3e283bf7155105735e26e34dbed3ef483a96054c6807a36b44a6fb4b40149ebc"} Nov 25 23:27:31 crc kubenswrapper[5045]: I1125 23:27:31.671438 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd" event={"ID":"d3ed9cbb-dcd0-40c2-87b5-57087c5e5855","Type":"ContainerStarted","Data":"1dcbd0c8abc5213da0cb8ca807f387da17065211b920d538aa7dfd0fc4339ea3"} Nov 25 23:27:31 crc kubenswrapper[5045]: I1125 23:27:31.697319 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd" podStartSLOduration=2.309032538 podStartE2EDuration="2.697288831s" podCreationTimestamp="2025-11-25 23:27:29 +0000 UTC" firstStartedPulling="2025-11-25 23:27:30.444192436 +0000 UTC m=+1706.801851548" lastFinishedPulling="2025-11-25 23:27:30.832448709 +0000 UTC m=+1707.190107841" observedRunningTime="2025-11-25 23:27:31.689523811 +0000 UTC m=+1708.047182963" watchObservedRunningTime="2025-11-25 23:27:31.697288831 +0000 UTC m=+1708.054947983" Nov 25 23:27:33 crc kubenswrapper[5045]: I1125 23:27:33.067830 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-t4zxw"] Nov 25 23:27:33 crc kubenswrapper[5045]: I1125 23:27:33.082129 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-t4zxw"] Nov 25 23:27:34 crc kubenswrapper[5045]: I1125 23:27:34.406783 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5db210ab-fa77-45b7-a0f8-d115ad2a5f73" path="/var/lib/kubelet/pods/5db210ab-fa77-45b7-a0f8-d115ad2a5f73/volumes" Nov 25 23:27:36 crc kubenswrapper[5045]: I1125 23:27:36.396674 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:27:36 crc kubenswrapper[5045]: E1125 23:27:36.397283 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:27:38 crc kubenswrapper[5045]: I1125 23:27:38.048892 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-9751-account-create-update-mt6kc"] Nov 25 23:27:38 crc kubenswrapper[5045]: I1125 23:27:38.064387 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-9751-account-create-update-mt6kc"] Nov 25 23:27:38 crc kubenswrapper[5045]: I1125 23:27:38.426133 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a371173-44b3-46c2-ad41-31b7387aca8a" path="/var/lib/kubelet/pods/2a371173-44b3-46c2-ad41-31b7387aca8a/volumes" Nov 25 23:27:39 crc kubenswrapper[5045]: I1125 23:27:39.055681 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-dbmm2"] Nov 25 23:27:39 crc kubenswrapper[5045]: I1125 23:27:39.069116 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-2hc8p"] Nov 25 23:27:39 crc kubenswrapper[5045]: I1125 23:27:39.079531 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-dbmm2"] Nov 25 23:27:39 crc kubenswrapper[5045]: I1125 23:27:39.087627 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-wlv7x"] Nov 25 23:27:39 crc kubenswrapper[5045]: I1125 23:27:39.095512 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-6d43-account-create-update-kmw94"] Nov 25 23:27:39 crc kubenswrapper[5045]: I1125 23:27:39.103785 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-2hc8p"] Nov 25 23:27:39 crc kubenswrapper[5045]: I1125 23:27:39.113295 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-wlv7x"] Nov 25 23:27:39 crc kubenswrapper[5045]: I1125 23:27:39.119260 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-6d43-account-create-update-kmw94"] Nov 25 23:27:40 crc kubenswrapper[5045]: I1125 23:27:40.419367 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a79af51-25bb-4e3f-a735-c67a4bc01360" path="/var/lib/kubelet/pods/1a79af51-25bb-4e3f-a735-c67a4bc01360/volumes" Nov 25 23:27:40 crc kubenswrapper[5045]: I1125 23:27:40.420677 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="416caee6-d98c-4f85-a3a7-e23594648a25" path="/var/lib/kubelet/pods/416caee6-d98c-4f85-a3a7-e23594648a25/volumes" Nov 25 23:27:40 crc kubenswrapper[5045]: I1125 23:27:40.421975 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ce2c763-4353-4914-bc30-b4737ca1ffcf" path="/var/lib/kubelet/pods/4ce2c763-4353-4914-bc30-b4737ca1ffcf/volumes" Nov 25 23:27:40 crc kubenswrapper[5045]: I1125 23:27:40.423327 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c12eb36-5101-42cd-85f1-b9cc0ff4dc89" path="/var/lib/kubelet/pods/6c12eb36-5101-42cd-85f1-b9cc0ff4dc89/volumes" Nov 25 23:27:42 crc kubenswrapper[5045]: I1125 23:27:42.041134 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-8c0a-account-create-update-65smx"] Nov 25 23:27:42 crc kubenswrapper[5045]: I1125 23:27:42.052935 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-8c0a-account-create-update-65smx"] Nov 25 23:27:42 crc kubenswrapper[5045]: I1125 23:27:42.415302 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56361182-254c-43f6-893c-9f83d9942fe3" path="/var/lib/kubelet/pods/56361182-254c-43f6-893c-9f83d9942fe3/volumes" Nov 25 23:27:47 crc kubenswrapper[5045]: I1125 23:27:47.048196 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-6szwq"] Nov 25 23:27:47 crc kubenswrapper[5045]: I1125 23:27:47.059590 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-6szwq"] Nov 25 23:27:48 crc kubenswrapper[5045]: I1125 23:27:48.417144 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ddb0f925-b7f9-4daa-ad41-64b73c18a6cc" path="/var/lib/kubelet/pods/ddb0f925-b7f9-4daa-ad41-64b73c18a6cc/volumes" Nov 25 23:27:49 crc kubenswrapper[5045]: I1125 23:27:49.397460 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:27:49 crc kubenswrapper[5045]: E1125 23:27:49.398287 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:28:04 crc kubenswrapper[5045]: I1125 23:28:04.404375 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:28:04 crc kubenswrapper[5045]: E1125 23:28:04.405310 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:28:13 crc kubenswrapper[5045]: I1125 23:28:13.170152 5045 generic.go:334] "Generic (PLEG): container finished" podID="d3ed9cbb-dcd0-40c2-87b5-57087c5e5855" containerID="1dcbd0c8abc5213da0cb8ca807f387da17065211b920d538aa7dfd0fc4339ea3" exitCode=0 Nov 25 23:28:13 crc kubenswrapper[5045]: I1125 23:28:13.170867 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd" event={"ID":"d3ed9cbb-dcd0-40c2-87b5-57087c5e5855","Type":"ContainerDied","Data":"1dcbd0c8abc5213da0cb8ca807f387da17065211b920d538aa7dfd0fc4339ea3"} Nov 25 23:28:14 crc kubenswrapper[5045]: I1125 23:28:14.066637 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-7fqmg"] Nov 25 23:28:14 crc kubenswrapper[5045]: I1125 23:28:14.083016 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-7fqmg"] Nov 25 23:28:14 crc kubenswrapper[5045]: I1125 23:28:14.436632 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9344bf81-aa30-41d5-8dd4-49a7f32a0cfb" path="/var/lib/kubelet/pods/9344bf81-aa30-41d5-8dd4-49a7f32a0cfb/volumes" Nov 25 23:28:14 crc kubenswrapper[5045]: I1125 23:28:14.642283 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd" Nov 25 23:28:14 crc kubenswrapper[5045]: I1125 23:28:14.794958 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d3ed9cbb-dcd0-40c2-87b5-57087c5e5855-inventory\") pod \"d3ed9cbb-dcd0-40c2-87b5-57087c5e5855\" (UID: \"d3ed9cbb-dcd0-40c2-87b5-57087c5e5855\") " Nov 25 23:28:14 crc kubenswrapper[5045]: I1125 23:28:14.795075 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d3ed9cbb-dcd0-40c2-87b5-57087c5e5855-ssh-key\") pod \"d3ed9cbb-dcd0-40c2-87b5-57087c5e5855\" (UID: \"d3ed9cbb-dcd0-40c2-87b5-57087c5e5855\") " Nov 25 23:28:14 crc kubenswrapper[5045]: I1125 23:28:14.795157 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sxgzb\" (UniqueName: \"kubernetes.io/projected/d3ed9cbb-dcd0-40c2-87b5-57087c5e5855-kube-api-access-sxgzb\") pod \"d3ed9cbb-dcd0-40c2-87b5-57087c5e5855\" (UID: \"d3ed9cbb-dcd0-40c2-87b5-57087c5e5855\") " Nov 25 23:28:14 crc kubenswrapper[5045]: I1125 23:28:14.802549 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3ed9cbb-dcd0-40c2-87b5-57087c5e5855-kube-api-access-sxgzb" (OuterVolumeSpecName: "kube-api-access-sxgzb") pod "d3ed9cbb-dcd0-40c2-87b5-57087c5e5855" (UID: "d3ed9cbb-dcd0-40c2-87b5-57087c5e5855"). InnerVolumeSpecName "kube-api-access-sxgzb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:28:14 crc kubenswrapper[5045]: I1125 23:28:14.844062 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3ed9cbb-dcd0-40c2-87b5-57087c5e5855-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d3ed9cbb-dcd0-40c2-87b5-57087c5e5855" (UID: "d3ed9cbb-dcd0-40c2-87b5-57087c5e5855"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:28:14 crc kubenswrapper[5045]: I1125 23:28:14.844225 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3ed9cbb-dcd0-40c2-87b5-57087c5e5855-inventory" (OuterVolumeSpecName: "inventory") pod "d3ed9cbb-dcd0-40c2-87b5-57087c5e5855" (UID: "d3ed9cbb-dcd0-40c2-87b5-57087c5e5855"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:28:14 crc kubenswrapper[5045]: I1125 23:28:14.898137 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d3ed9cbb-dcd0-40c2-87b5-57087c5e5855-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:28:14 crc kubenswrapper[5045]: I1125 23:28:14.898191 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d3ed9cbb-dcd0-40c2-87b5-57087c5e5855-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:28:14 crc kubenswrapper[5045]: I1125 23:28:14.898212 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sxgzb\" (UniqueName: \"kubernetes.io/projected/d3ed9cbb-dcd0-40c2-87b5-57087c5e5855-kube-api-access-sxgzb\") on node \"crc\" DevicePath \"\"" Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.194414 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd" event={"ID":"d3ed9cbb-dcd0-40c2-87b5-57087c5e5855","Type":"ContainerDied","Data":"3e283bf7155105735e26e34dbed3ef483a96054c6807a36b44a6fb4b40149ebc"} Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.194457 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e283bf7155105735e26e34dbed3ef483a96054c6807a36b44a6fb4b40149ebc" Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.194511 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd" Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.314038 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m"] Nov 25 23:28:15 crc kubenswrapper[5045]: E1125 23:28:15.314426 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3ed9cbb-dcd0-40c2-87b5-57087c5e5855" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.314447 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3ed9cbb-dcd0-40c2-87b5-57087c5e5855" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.314685 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3ed9cbb-dcd0-40c2-87b5-57087c5e5855" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.315510 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m" Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.318304 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.319319 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.319695 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.319752 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.333749 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m"] Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.510358 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxs7x\" (UniqueName: \"kubernetes.io/projected/53770c81-187b-44d6-b05a-887e6683232f-kube-api-access-vxs7x\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m\" (UID: \"53770c81-187b-44d6-b05a-887e6683232f\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m" Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.510443 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53770c81-187b-44d6-b05a-887e6683232f-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m\" (UID: \"53770c81-187b-44d6-b05a-887e6683232f\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m" Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.510545 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53770c81-187b-44d6-b05a-887e6683232f-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m\" (UID: \"53770c81-187b-44d6-b05a-887e6683232f\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m" Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.612589 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxs7x\" (UniqueName: \"kubernetes.io/projected/53770c81-187b-44d6-b05a-887e6683232f-kube-api-access-vxs7x\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m\" (UID: \"53770c81-187b-44d6-b05a-887e6683232f\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m" Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.612709 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53770c81-187b-44d6-b05a-887e6683232f-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m\" (UID: \"53770c81-187b-44d6-b05a-887e6683232f\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m" Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.612858 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53770c81-187b-44d6-b05a-887e6683232f-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m\" (UID: \"53770c81-187b-44d6-b05a-887e6683232f\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m" Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.619350 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53770c81-187b-44d6-b05a-887e6683232f-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m\" (UID: \"53770c81-187b-44d6-b05a-887e6683232f\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m" Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.620497 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53770c81-187b-44d6-b05a-887e6683232f-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m\" (UID: \"53770c81-187b-44d6-b05a-887e6683232f\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m" Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.630842 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxs7x\" (UniqueName: \"kubernetes.io/projected/53770c81-187b-44d6-b05a-887e6683232f-kube-api-access-vxs7x\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m\" (UID: \"53770c81-187b-44d6-b05a-887e6683232f\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m" Nov 25 23:28:15 crc kubenswrapper[5045]: I1125 23:28:15.653472 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m" Nov 25 23:28:16 crc kubenswrapper[5045]: I1125 23:28:16.210880 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m"] Nov 25 23:28:16 crc kubenswrapper[5045]: I1125 23:28:16.397097 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:28:16 crc kubenswrapper[5045]: E1125 23:28:16.397387 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:28:17 crc kubenswrapper[5045]: I1125 23:28:17.223973 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m" event={"ID":"53770c81-187b-44d6-b05a-887e6683232f","Type":"ContainerStarted","Data":"45da6e7843b8cdd6105419c9647b589bea790db89ac473bc4e744eef583678ea"} Nov 25 23:28:17 crc kubenswrapper[5045]: I1125 23:28:17.224874 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m" event={"ID":"53770c81-187b-44d6-b05a-887e6683232f","Type":"ContainerStarted","Data":"a75532b406894f7b0e375beacc4b212fe4bacb7436454644ad372aa7ffee1c80"} Nov 25 23:28:17 crc kubenswrapper[5045]: I1125 23:28:17.255782 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m" podStartSLOduration=1.7418594170000001 podStartE2EDuration="2.255753023s" podCreationTimestamp="2025-11-25 23:28:15 +0000 UTC" firstStartedPulling="2025-11-25 23:28:16.212189131 +0000 UTC m=+1752.569848243" lastFinishedPulling="2025-11-25 23:28:16.726082747 +0000 UTC m=+1753.083741849" observedRunningTime="2025-11-25 23:28:17.243948963 +0000 UTC m=+1753.601608115" watchObservedRunningTime="2025-11-25 23:28:17.255753023 +0000 UTC m=+1753.613412165" Nov 25 23:28:18 crc kubenswrapper[5045]: I1125 23:28:18.603645 5045 scope.go:117] "RemoveContainer" containerID="410aaad66169913c92b452e5a56fe7085e2bfc6b8ec920800a3e41f635fe543f" Nov 25 23:28:18 crc kubenswrapper[5045]: I1125 23:28:18.644890 5045 scope.go:117] "RemoveContainer" containerID="2e633e2246b417362f8104c5a5e2bc6b247de7d4432f5cd917063516ba7f409f" Nov 25 23:28:18 crc kubenswrapper[5045]: I1125 23:28:18.709527 5045 scope.go:117] "RemoveContainer" containerID="344cec385ed3c28abf262abbe66459da36fe46ecdec1e1a5d7e0a97f4607f020" Nov 25 23:28:18 crc kubenswrapper[5045]: I1125 23:28:18.768868 5045 scope.go:117] "RemoveContainer" containerID="5be049f291347f48c5690dcb0687d99696e28e794691ed94708e39dfd227cbf8" Nov 25 23:28:18 crc kubenswrapper[5045]: I1125 23:28:18.854048 5045 scope.go:117] "RemoveContainer" containerID="a736b048d86b6c834f984d731e342c161c26aaeb8ab6c7cfee488c12fbd6cf98" Nov 25 23:28:18 crc kubenswrapper[5045]: I1125 23:28:18.886654 5045 scope.go:117] "RemoveContainer" containerID="cf07e355747c9268cd065bfe7115f250d1e52bef25c3b22cadab9634532ed660" Nov 25 23:28:18 crc kubenswrapper[5045]: I1125 23:28:18.926066 5045 scope.go:117] "RemoveContainer" containerID="f0ae5b9351cc0125384d0dbe3a2d09b21cc4e2bc66b54532f3936f8881dae6df" Nov 25 23:28:18 crc kubenswrapper[5045]: I1125 23:28:18.950839 5045 scope.go:117] "RemoveContainer" containerID="314cf21683fc34621c621510ec7c149462274c3133fefe34402ab7acf029d3ae" Nov 25 23:28:19 crc kubenswrapper[5045]: I1125 23:28:19.000814 5045 scope.go:117] "RemoveContainer" containerID="15776c56598a7a3b12a1a16d02731d891849d73c9d23ea55f5cda4c9f25bb029" Nov 25 23:28:22 crc kubenswrapper[5045]: I1125 23:28:22.050619 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-gtpx4"] Nov 25 23:28:22 crc kubenswrapper[5045]: I1125 23:28:22.066562 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-lht5b"] Nov 25 23:28:22 crc kubenswrapper[5045]: I1125 23:28:22.085771 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-gtpx4"] Nov 25 23:28:22 crc kubenswrapper[5045]: I1125 23:28:22.092822 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-lht5b"] Nov 25 23:28:22 crc kubenswrapper[5045]: I1125 23:28:22.284547 5045 generic.go:334] "Generic (PLEG): container finished" podID="53770c81-187b-44d6-b05a-887e6683232f" containerID="45da6e7843b8cdd6105419c9647b589bea790db89ac473bc4e744eef583678ea" exitCode=0 Nov 25 23:28:22 crc kubenswrapper[5045]: I1125 23:28:22.284589 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m" event={"ID":"53770c81-187b-44d6-b05a-887e6683232f","Type":"ContainerDied","Data":"45da6e7843b8cdd6105419c9647b589bea790db89ac473bc4e744eef583678ea"} Nov 25 23:28:22 crc kubenswrapper[5045]: I1125 23:28:22.416036 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="abd9d902-3ba4-49d6-900e-9411bdd8b222" path="/var/lib/kubelet/pods/abd9d902-3ba4-49d6-900e-9411bdd8b222/volumes" Nov 25 23:28:22 crc kubenswrapper[5045]: I1125 23:28:22.417513 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89" path="/var/lib/kubelet/pods/fe936cb4-d5fc-44cd-b352-4b3d3f7c6a89/volumes" Nov 25 23:28:23 crc kubenswrapper[5045]: I1125 23:28:23.809469 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m" Nov 25 23:28:23 crc kubenswrapper[5045]: I1125 23:28:23.874957 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vxs7x\" (UniqueName: \"kubernetes.io/projected/53770c81-187b-44d6-b05a-887e6683232f-kube-api-access-vxs7x\") pod \"53770c81-187b-44d6-b05a-887e6683232f\" (UID: \"53770c81-187b-44d6-b05a-887e6683232f\") " Nov 25 23:28:23 crc kubenswrapper[5045]: I1125 23:28:23.875100 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53770c81-187b-44d6-b05a-887e6683232f-ssh-key\") pod \"53770c81-187b-44d6-b05a-887e6683232f\" (UID: \"53770c81-187b-44d6-b05a-887e6683232f\") " Nov 25 23:28:23 crc kubenswrapper[5045]: I1125 23:28:23.875381 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53770c81-187b-44d6-b05a-887e6683232f-inventory\") pod \"53770c81-187b-44d6-b05a-887e6683232f\" (UID: \"53770c81-187b-44d6-b05a-887e6683232f\") " Nov 25 23:28:23 crc kubenswrapper[5045]: I1125 23:28:23.885127 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53770c81-187b-44d6-b05a-887e6683232f-kube-api-access-vxs7x" (OuterVolumeSpecName: "kube-api-access-vxs7x") pod "53770c81-187b-44d6-b05a-887e6683232f" (UID: "53770c81-187b-44d6-b05a-887e6683232f"). InnerVolumeSpecName "kube-api-access-vxs7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:28:23 crc kubenswrapper[5045]: I1125 23:28:23.916471 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53770c81-187b-44d6-b05a-887e6683232f-inventory" (OuterVolumeSpecName: "inventory") pod "53770c81-187b-44d6-b05a-887e6683232f" (UID: "53770c81-187b-44d6-b05a-887e6683232f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:28:23 crc kubenswrapper[5045]: I1125 23:28:23.919356 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53770c81-187b-44d6-b05a-887e6683232f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "53770c81-187b-44d6-b05a-887e6683232f" (UID: "53770c81-187b-44d6-b05a-887e6683232f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:28:23 crc kubenswrapper[5045]: I1125 23:28:23.978400 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53770c81-187b-44d6-b05a-887e6683232f-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:28:23 crc kubenswrapper[5045]: I1125 23:28:23.978441 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vxs7x\" (UniqueName: \"kubernetes.io/projected/53770c81-187b-44d6-b05a-887e6683232f-kube-api-access-vxs7x\") on node \"crc\" DevicePath \"\"" Nov 25 23:28:23 crc kubenswrapper[5045]: I1125 23:28:23.978454 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53770c81-187b-44d6-b05a-887e6683232f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.307335 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m" event={"ID":"53770c81-187b-44d6-b05a-887e6683232f","Type":"ContainerDied","Data":"a75532b406894f7b0e375beacc4b212fe4bacb7436454644ad372aa7ffee1c80"} Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.307762 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a75532b406894f7b0e375beacc4b212fe4bacb7436454644ad372aa7ffee1c80" Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.307379 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m" Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.415277 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl"] Nov 25 23:28:24 crc kubenswrapper[5045]: E1125 23:28:24.415777 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53770c81-187b-44d6-b05a-887e6683232f" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.415807 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="53770c81-187b-44d6-b05a-887e6683232f" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.416142 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="53770c81-187b-44d6-b05a-887e6683232f" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.416880 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl"] Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.416952 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl" Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.423816 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.423908 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.424091 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.424805 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.594295 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6bde7529-f01b-4aaa-9fe3-1aa5341fad62-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-bffvl\" (UID: \"6bde7529-f01b-4aaa-9fe3-1aa5341fad62\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl" Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.594457 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vx74c\" (UniqueName: \"kubernetes.io/projected/6bde7529-f01b-4aaa-9fe3-1aa5341fad62-kube-api-access-vx74c\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-bffvl\" (UID: \"6bde7529-f01b-4aaa-9fe3-1aa5341fad62\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl" Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.594573 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6bde7529-f01b-4aaa-9fe3-1aa5341fad62-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-bffvl\" (UID: \"6bde7529-f01b-4aaa-9fe3-1aa5341fad62\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl" Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.696906 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6bde7529-f01b-4aaa-9fe3-1aa5341fad62-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-bffvl\" (UID: \"6bde7529-f01b-4aaa-9fe3-1aa5341fad62\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl" Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.697256 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6bde7529-f01b-4aaa-9fe3-1aa5341fad62-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-bffvl\" (UID: \"6bde7529-f01b-4aaa-9fe3-1aa5341fad62\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl" Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.697390 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vx74c\" (UniqueName: \"kubernetes.io/projected/6bde7529-f01b-4aaa-9fe3-1aa5341fad62-kube-api-access-vx74c\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-bffvl\" (UID: \"6bde7529-f01b-4aaa-9fe3-1aa5341fad62\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl" Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.705074 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6bde7529-f01b-4aaa-9fe3-1aa5341fad62-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-bffvl\" (UID: \"6bde7529-f01b-4aaa-9fe3-1aa5341fad62\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl" Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.709053 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6bde7529-f01b-4aaa-9fe3-1aa5341fad62-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-bffvl\" (UID: \"6bde7529-f01b-4aaa-9fe3-1aa5341fad62\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl" Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.728138 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vx74c\" (UniqueName: \"kubernetes.io/projected/6bde7529-f01b-4aaa-9fe3-1aa5341fad62-kube-api-access-vx74c\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-bffvl\" (UID: \"6bde7529-f01b-4aaa-9fe3-1aa5341fad62\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl" Nov 25 23:28:24 crc kubenswrapper[5045]: I1125 23:28:24.741071 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl" Nov 25 23:28:25 crc kubenswrapper[5045]: I1125 23:28:25.149011 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl"] Nov 25 23:28:25 crc kubenswrapper[5045]: I1125 23:28:25.321916 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl" event={"ID":"6bde7529-f01b-4aaa-9fe3-1aa5341fad62","Type":"ContainerStarted","Data":"6fd5acf1a44eed93f2035014af6b3529654b9450a4cdb5cc682976516b3b418f"} Nov 25 23:28:26 crc kubenswrapper[5045]: I1125 23:28:26.333385 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl" event={"ID":"6bde7529-f01b-4aaa-9fe3-1aa5341fad62","Type":"ContainerStarted","Data":"befbfcea73604b99a29128a970a7df2aa233646d32e71e6fd99e2479d3b02558"} Nov 25 23:28:26 crc kubenswrapper[5045]: I1125 23:28:26.364997 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl" podStartSLOduration=1.854739605 podStartE2EDuration="2.364965897s" podCreationTimestamp="2025-11-25 23:28:24 +0000 UTC" firstStartedPulling="2025-11-25 23:28:25.161668057 +0000 UTC m=+1761.519327179" lastFinishedPulling="2025-11-25 23:28:25.671894329 +0000 UTC m=+1762.029553471" observedRunningTime="2025-11-25 23:28:26.360698927 +0000 UTC m=+1762.718358069" watchObservedRunningTime="2025-11-25 23:28:26.364965897 +0000 UTC m=+1762.722625049" Nov 25 23:28:28 crc kubenswrapper[5045]: I1125 23:28:28.037153 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-n58vj"] Nov 25 23:28:28 crc kubenswrapper[5045]: I1125 23:28:28.050632 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-n58vj"] Nov 25 23:28:28 crc kubenswrapper[5045]: I1125 23:28:28.396573 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:28:28 crc kubenswrapper[5045]: E1125 23:28:28.396993 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:28:28 crc kubenswrapper[5045]: I1125 23:28:28.416954 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="889492c0-db80-43f6-9a4f-36292139f3df" path="/var/lib/kubelet/pods/889492c0-db80-43f6-9a4f-36292139f3df/volumes" Nov 25 23:28:35 crc kubenswrapper[5045]: I1125 23:28:35.045932 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-lr4xs"] Nov 25 23:28:35 crc kubenswrapper[5045]: I1125 23:28:35.064353 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-lr4xs"] Nov 25 23:28:36 crc kubenswrapper[5045]: I1125 23:28:36.412862 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="483c4f92-701f-4dae-a00a-3a3d753d8c17" path="/var/lib/kubelet/pods/483c4f92-701f-4dae-a00a-3a3d753d8c17/volumes" Nov 25 23:28:40 crc kubenswrapper[5045]: I1125 23:28:40.397267 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:28:40 crc kubenswrapper[5045]: E1125 23:28:40.398047 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:28:54 crc kubenswrapper[5045]: I1125 23:28:54.416569 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:28:54 crc kubenswrapper[5045]: E1125 23:28:54.418115 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:29:08 crc kubenswrapper[5045]: I1125 23:29:08.398190 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:29:08 crc kubenswrapper[5045]: E1125 23:29:08.399466 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:29:17 crc kubenswrapper[5045]: I1125 23:29:17.078368 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-wzkt8"] Nov 25 23:29:17 crc kubenswrapper[5045]: I1125 23:29:17.133889 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-wscvj"] Nov 25 23:29:17 crc kubenswrapper[5045]: I1125 23:29:17.147572 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-wzkt8"] Nov 25 23:29:17 crc kubenswrapper[5045]: I1125 23:29:17.155919 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-7p9hc"] Nov 25 23:29:17 crc kubenswrapper[5045]: I1125 23:29:17.163775 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-03db-account-create-update-697t8"] Nov 25 23:29:17 crc kubenswrapper[5045]: I1125 23:29:17.171023 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-f20f-account-create-update-mn9dj"] Nov 25 23:29:17 crc kubenswrapper[5045]: I1125 23:29:17.177584 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-wscvj"] Nov 25 23:29:17 crc kubenswrapper[5045]: I1125 23:29:17.185004 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-f20f-account-create-update-mn9dj"] Nov 25 23:29:17 crc kubenswrapper[5045]: I1125 23:29:17.191809 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-7p9hc"] Nov 25 23:29:17 crc kubenswrapper[5045]: I1125 23:29:17.197827 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-03db-account-create-update-697t8"] Nov 25 23:29:17 crc kubenswrapper[5045]: I1125 23:29:17.203930 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-5376-account-create-update-r9q8j"] Nov 25 23:29:17 crc kubenswrapper[5045]: I1125 23:29:17.210055 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-5376-account-create-update-r9q8j"] Nov 25 23:29:18 crc kubenswrapper[5045]: I1125 23:29:18.417910 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f627043-0efd-4c7f-a7e6-16a2d5b12650" path="/var/lib/kubelet/pods/0f627043-0efd-4c7f-a7e6-16a2d5b12650/volumes" Nov 25 23:29:18 crc kubenswrapper[5045]: I1125 23:29:18.419835 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8502cc42-8be4-4cd2-b329-91f28be89ac8" path="/var/lib/kubelet/pods/8502cc42-8be4-4cd2-b329-91f28be89ac8/volumes" Nov 25 23:29:18 crc kubenswrapper[5045]: I1125 23:29:18.421131 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="983ae039-683a-49dc-a8e6-cfe305174a70" path="/var/lib/kubelet/pods/983ae039-683a-49dc-a8e6-cfe305174a70/volumes" Nov 25 23:29:18 crc kubenswrapper[5045]: I1125 23:29:18.422217 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bafa51d2-8844-41d5-ad62-5000cb9f18e5" path="/var/lib/kubelet/pods/bafa51d2-8844-41d5-ad62-5000cb9f18e5/volumes" Nov 25 23:29:18 crc kubenswrapper[5045]: I1125 23:29:18.424684 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce8b6926-27cc-4df4-86eb-53a9a17e548d" path="/var/lib/kubelet/pods/ce8b6926-27cc-4df4-86eb-53a9a17e548d/volumes" Nov 25 23:29:18 crc kubenswrapper[5045]: I1125 23:29:18.426122 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f" path="/var/lib/kubelet/pods/dc9f74e5-fe2e-4aee-924e-e73fd9e9ef2f/volumes" Nov 25 23:29:19 crc kubenswrapper[5045]: I1125 23:29:19.250258 5045 scope.go:117] "RemoveContainer" containerID="66115cabeaef3327c6ca5397c7dc11e8c6951dad1b51d63957e85eecca97dd3c" Nov 25 23:29:19 crc kubenswrapper[5045]: I1125 23:29:19.293325 5045 scope.go:117] "RemoveContainer" containerID="bb06757e8d16196b52789a3af42cc911e546ff88ed70ad4c9b5fea47627b4589" Nov 25 23:29:19 crc kubenswrapper[5045]: I1125 23:29:19.361816 5045 scope.go:117] "RemoveContainer" containerID="18309d55735010527c49a18dffbf05debe914d26c010cbd48eb9cfc556f26120" Nov 25 23:29:19 crc kubenswrapper[5045]: I1125 23:29:19.403622 5045 scope.go:117] "RemoveContainer" containerID="ab401352773cd5b2d5ca78e559ec38054346c749e815f0733ab5e4d3b18524a8" Nov 25 23:29:19 crc kubenswrapper[5045]: I1125 23:29:19.456203 5045 scope.go:117] "RemoveContainer" containerID="ad5850ba451f04b8f864d1f1bbdc82c5226df462911276b55f5a45a2baa92f95" Nov 25 23:29:19 crc kubenswrapper[5045]: I1125 23:29:19.480203 5045 scope.go:117] "RemoveContainer" containerID="edf42132a733929c3dbc71af34e742f6637a31313ecb5546c9fb80dec797b63c" Nov 25 23:29:19 crc kubenswrapper[5045]: I1125 23:29:19.510629 5045 scope.go:117] "RemoveContainer" containerID="ba879f8c25bf12fc747c8faa3ae7c732f6332e397a9ebbaba25e190b478b0788" Nov 25 23:29:19 crc kubenswrapper[5045]: I1125 23:29:19.552841 5045 scope.go:117] "RemoveContainer" containerID="e21614a740586314e5b6619f53a4219515e19f23d857587f4f4fc3df9b5284dc" Nov 25 23:29:19 crc kubenswrapper[5045]: I1125 23:29:19.583264 5045 scope.go:117] "RemoveContainer" containerID="24840b389f34f85a70f3bf69b11dac4081793ecf6272c219857be45fbd35d044" Nov 25 23:29:19 crc kubenswrapper[5045]: I1125 23:29:19.634898 5045 scope.go:117] "RemoveContainer" containerID="65fd06be77de57381a987b2759a4eab64c44932e5851903bda61639acc2dd21f" Nov 25 23:29:23 crc kubenswrapper[5045]: I1125 23:29:23.397308 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:29:23 crc kubenswrapper[5045]: E1125 23:29:23.398082 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:29:27 crc kubenswrapper[5045]: I1125 23:29:27.043448 5045 generic.go:334] "Generic (PLEG): container finished" podID="6bde7529-f01b-4aaa-9fe3-1aa5341fad62" containerID="befbfcea73604b99a29128a970a7df2aa233646d32e71e6fd99e2479d3b02558" exitCode=0 Nov 25 23:29:27 crc kubenswrapper[5045]: I1125 23:29:27.043542 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl" event={"ID":"6bde7529-f01b-4aaa-9fe3-1aa5341fad62","Type":"ContainerDied","Data":"befbfcea73604b99a29128a970a7df2aa233646d32e71e6fd99e2479d3b02558"} Nov 25 23:29:28 crc kubenswrapper[5045]: I1125 23:29:28.454312 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl" Nov 25 23:29:28 crc kubenswrapper[5045]: I1125 23:29:28.574779 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6bde7529-f01b-4aaa-9fe3-1aa5341fad62-inventory\") pod \"6bde7529-f01b-4aaa-9fe3-1aa5341fad62\" (UID: \"6bde7529-f01b-4aaa-9fe3-1aa5341fad62\") " Nov 25 23:29:28 crc kubenswrapper[5045]: I1125 23:29:28.574953 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6bde7529-f01b-4aaa-9fe3-1aa5341fad62-ssh-key\") pod \"6bde7529-f01b-4aaa-9fe3-1aa5341fad62\" (UID: \"6bde7529-f01b-4aaa-9fe3-1aa5341fad62\") " Nov 25 23:29:28 crc kubenswrapper[5045]: I1125 23:29:28.575122 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vx74c\" (UniqueName: \"kubernetes.io/projected/6bde7529-f01b-4aaa-9fe3-1aa5341fad62-kube-api-access-vx74c\") pod \"6bde7529-f01b-4aaa-9fe3-1aa5341fad62\" (UID: \"6bde7529-f01b-4aaa-9fe3-1aa5341fad62\") " Nov 25 23:29:28 crc kubenswrapper[5045]: I1125 23:29:28.590958 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6bde7529-f01b-4aaa-9fe3-1aa5341fad62-kube-api-access-vx74c" (OuterVolumeSpecName: "kube-api-access-vx74c") pod "6bde7529-f01b-4aaa-9fe3-1aa5341fad62" (UID: "6bde7529-f01b-4aaa-9fe3-1aa5341fad62"). InnerVolumeSpecName "kube-api-access-vx74c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:29:28 crc kubenswrapper[5045]: I1125 23:29:28.619191 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bde7529-f01b-4aaa-9fe3-1aa5341fad62-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6bde7529-f01b-4aaa-9fe3-1aa5341fad62" (UID: "6bde7529-f01b-4aaa-9fe3-1aa5341fad62"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:29:28 crc kubenswrapper[5045]: I1125 23:29:28.626536 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bde7529-f01b-4aaa-9fe3-1aa5341fad62-inventory" (OuterVolumeSpecName: "inventory") pod "6bde7529-f01b-4aaa-9fe3-1aa5341fad62" (UID: "6bde7529-f01b-4aaa-9fe3-1aa5341fad62"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:29:28 crc kubenswrapper[5045]: I1125 23:29:28.679076 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6bde7529-f01b-4aaa-9fe3-1aa5341fad62-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:29:28 crc kubenswrapper[5045]: I1125 23:29:28.679157 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6bde7529-f01b-4aaa-9fe3-1aa5341fad62-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:29:28 crc kubenswrapper[5045]: I1125 23:29:28.679177 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vx74c\" (UniqueName: \"kubernetes.io/projected/6bde7529-f01b-4aaa-9fe3-1aa5341fad62-kube-api-access-vx74c\") on node \"crc\" DevicePath \"\"" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.070756 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl" event={"ID":"6bde7529-f01b-4aaa-9fe3-1aa5341fad62","Type":"ContainerDied","Data":"6fd5acf1a44eed93f2035014af6b3529654b9450a4cdb5cc682976516b3b418f"} Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.071267 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6fd5acf1a44eed93f2035014af6b3529654b9450a4cdb5cc682976516b3b418f" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.070874 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.186328 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-xqht7"] Nov 25 23:29:29 crc kubenswrapper[5045]: E1125 23:29:29.187343 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bde7529-f01b-4aaa-9fe3-1aa5341fad62" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.187381 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bde7529-f01b-4aaa-9fe3-1aa5341fad62" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.187689 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="6bde7529-f01b-4aaa-9fe3-1aa5341fad62" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.188661 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-xqht7" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.196140 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.196743 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.196998 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.197252 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.204815 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-xqht7"] Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.295182 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stfh5\" (UniqueName: \"kubernetes.io/projected/1abc767d-c2dd-4500-96ed-92f1dbf3d327-kube-api-access-stfh5\") pod \"ssh-known-hosts-edpm-deployment-xqht7\" (UID: \"1abc767d-c2dd-4500-96ed-92f1dbf3d327\") " pod="openstack/ssh-known-hosts-edpm-deployment-xqht7" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.295563 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1abc767d-c2dd-4500-96ed-92f1dbf3d327-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-xqht7\" (UID: \"1abc767d-c2dd-4500-96ed-92f1dbf3d327\") " pod="openstack/ssh-known-hosts-edpm-deployment-xqht7" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.295668 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1abc767d-c2dd-4500-96ed-92f1dbf3d327-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-xqht7\" (UID: \"1abc767d-c2dd-4500-96ed-92f1dbf3d327\") " pod="openstack/ssh-known-hosts-edpm-deployment-xqht7" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.397553 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1abc767d-c2dd-4500-96ed-92f1dbf3d327-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-xqht7\" (UID: \"1abc767d-c2dd-4500-96ed-92f1dbf3d327\") " pod="openstack/ssh-known-hosts-edpm-deployment-xqht7" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.397648 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1abc767d-c2dd-4500-96ed-92f1dbf3d327-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-xqht7\" (UID: \"1abc767d-c2dd-4500-96ed-92f1dbf3d327\") " pod="openstack/ssh-known-hosts-edpm-deployment-xqht7" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.397836 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stfh5\" (UniqueName: \"kubernetes.io/projected/1abc767d-c2dd-4500-96ed-92f1dbf3d327-kube-api-access-stfh5\") pod \"ssh-known-hosts-edpm-deployment-xqht7\" (UID: \"1abc767d-c2dd-4500-96ed-92f1dbf3d327\") " pod="openstack/ssh-known-hosts-edpm-deployment-xqht7" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.403516 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1abc767d-c2dd-4500-96ed-92f1dbf3d327-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-xqht7\" (UID: \"1abc767d-c2dd-4500-96ed-92f1dbf3d327\") " pod="openstack/ssh-known-hosts-edpm-deployment-xqht7" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.406840 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1abc767d-c2dd-4500-96ed-92f1dbf3d327-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-xqht7\" (UID: \"1abc767d-c2dd-4500-96ed-92f1dbf3d327\") " pod="openstack/ssh-known-hosts-edpm-deployment-xqht7" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.429130 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stfh5\" (UniqueName: \"kubernetes.io/projected/1abc767d-c2dd-4500-96ed-92f1dbf3d327-kube-api-access-stfh5\") pod \"ssh-known-hosts-edpm-deployment-xqht7\" (UID: \"1abc767d-c2dd-4500-96ed-92f1dbf3d327\") " pod="openstack/ssh-known-hosts-edpm-deployment-xqht7" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.542012 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-xqht7" Nov 25 23:29:29 crc kubenswrapper[5045]: I1125 23:29:29.973430 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-xqht7"] Nov 25 23:29:30 crc kubenswrapper[5045]: I1125 23:29:30.089091 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-xqht7" event={"ID":"1abc767d-c2dd-4500-96ed-92f1dbf3d327","Type":"ContainerStarted","Data":"8370ba43ffc8b05a718eae409458a481c6d02b4e0e4fbf56667a5fa53734a604"} Nov 25 23:29:31 crc kubenswrapper[5045]: I1125 23:29:31.103240 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-xqht7" event={"ID":"1abc767d-c2dd-4500-96ed-92f1dbf3d327","Type":"ContainerStarted","Data":"c50547b81a752b7c84224f6649c74b625d780b057b97e1c8821e32a3d8a6af1a"} Nov 25 23:29:31 crc kubenswrapper[5045]: I1125 23:29:31.142864 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-xqht7" podStartSLOduration=1.665014106 podStartE2EDuration="2.142838109s" podCreationTimestamp="2025-11-25 23:29:29 +0000 UTC" firstStartedPulling="2025-11-25 23:29:29.984440649 +0000 UTC m=+1826.342099771" lastFinishedPulling="2025-11-25 23:29:30.462264652 +0000 UTC m=+1826.819923774" observedRunningTime="2025-11-25 23:29:31.127192352 +0000 UTC m=+1827.484851504" watchObservedRunningTime="2025-11-25 23:29:31.142838109 +0000 UTC m=+1827.500497211" Nov 25 23:29:36 crc kubenswrapper[5045]: I1125 23:29:36.397175 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:29:36 crc kubenswrapper[5045]: E1125 23:29:36.398360 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:29:39 crc kubenswrapper[5045]: I1125 23:29:39.186890 5045 generic.go:334] "Generic (PLEG): container finished" podID="1abc767d-c2dd-4500-96ed-92f1dbf3d327" containerID="c50547b81a752b7c84224f6649c74b625d780b057b97e1c8821e32a3d8a6af1a" exitCode=0 Nov 25 23:29:39 crc kubenswrapper[5045]: I1125 23:29:39.187025 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-xqht7" event={"ID":"1abc767d-c2dd-4500-96ed-92f1dbf3d327","Type":"ContainerDied","Data":"c50547b81a752b7c84224f6649c74b625d780b057b97e1c8821e32a3d8a6af1a"} Nov 25 23:29:40 crc kubenswrapper[5045]: I1125 23:29:40.759103 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-xqht7" Nov 25 23:29:40 crc kubenswrapper[5045]: I1125 23:29:40.878403 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-stfh5\" (UniqueName: \"kubernetes.io/projected/1abc767d-c2dd-4500-96ed-92f1dbf3d327-kube-api-access-stfh5\") pod \"1abc767d-c2dd-4500-96ed-92f1dbf3d327\" (UID: \"1abc767d-c2dd-4500-96ed-92f1dbf3d327\") " Nov 25 23:29:40 crc kubenswrapper[5045]: I1125 23:29:40.878634 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1abc767d-c2dd-4500-96ed-92f1dbf3d327-inventory-0\") pod \"1abc767d-c2dd-4500-96ed-92f1dbf3d327\" (UID: \"1abc767d-c2dd-4500-96ed-92f1dbf3d327\") " Nov 25 23:29:40 crc kubenswrapper[5045]: I1125 23:29:40.878796 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1abc767d-c2dd-4500-96ed-92f1dbf3d327-ssh-key-openstack-edpm-ipam\") pod \"1abc767d-c2dd-4500-96ed-92f1dbf3d327\" (UID: \"1abc767d-c2dd-4500-96ed-92f1dbf3d327\") " Nov 25 23:29:40 crc kubenswrapper[5045]: I1125 23:29:40.885339 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1abc767d-c2dd-4500-96ed-92f1dbf3d327-kube-api-access-stfh5" (OuterVolumeSpecName: "kube-api-access-stfh5") pod "1abc767d-c2dd-4500-96ed-92f1dbf3d327" (UID: "1abc767d-c2dd-4500-96ed-92f1dbf3d327"). InnerVolumeSpecName "kube-api-access-stfh5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:29:40 crc kubenswrapper[5045]: I1125 23:29:40.910125 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1abc767d-c2dd-4500-96ed-92f1dbf3d327-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "1abc767d-c2dd-4500-96ed-92f1dbf3d327" (UID: "1abc767d-c2dd-4500-96ed-92f1dbf3d327"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:29:40 crc kubenswrapper[5045]: I1125 23:29:40.911196 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1abc767d-c2dd-4500-96ed-92f1dbf3d327-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "1abc767d-c2dd-4500-96ed-92f1dbf3d327" (UID: "1abc767d-c2dd-4500-96ed-92f1dbf3d327"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:29:40 crc kubenswrapper[5045]: I1125 23:29:40.981468 5045 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1abc767d-c2dd-4500-96ed-92f1dbf3d327-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 25 23:29:40 crc kubenswrapper[5045]: I1125 23:29:40.981511 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1abc767d-c2dd-4500-96ed-92f1dbf3d327-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 23:29:40 crc kubenswrapper[5045]: I1125 23:29:40.981527 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-stfh5\" (UniqueName: \"kubernetes.io/projected/1abc767d-c2dd-4500-96ed-92f1dbf3d327-kube-api-access-stfh5\") on node \"crc\" DevicePath \"\"" Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.214263 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-xqht7" event={"ID":"1abc767d-c2dd-4500-96ed-92f1dbf3d327","Type":"ContainerDied","Data":"8370ba43ffc8b05a718eae409458a481c6d02b4e0e4fbf56667a5fa53734a604"} Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.214317 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8370ba43ffc8b05a718eae409458a481c6d02b4e0e4fbf56667a5fa53734a604" Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.214363 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-xqht7" Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.312513 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p"] Nov 25 23:29:41 crc kubenswrapper[5045]: E1125 23:29:41.312950 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1abc767d-c2dd-4500-96ed-92f1dbf3d327" containerName="ssh-known-hosts-edpm-deployment" Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.312972 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="1abc767d-c2dd-4500-96ed-92f1dbf3d327" containerName="ssh-known-hosts-edpm-deployment" Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.313209 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="1abc767d-c2dd-4500-96ed-92f1dbf3d327" containerName="ssh-known-hosts-edpm-deployment" Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.315423 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p" Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.325428 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.325651 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.325429 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.326016 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.341840 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p"] Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.391081 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/579827b1-ab24-4853-8733-d9ec6d9e81f0-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-w5d4p\" (UID: \"579827b1-ab24-4853-8733-d9ec6d9e81f0\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p" Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.391155 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/579827b1-ab24-4853-8733-d9ec6d9e81f0-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-w5d4p\" (UID: \"579827b1-ab24-4853-8733-d9ec6d9e81f0\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p" Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.391386 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wn4r\" (UniqueName: \"kubernetes.io/projected/579827b1-ab24-4853-8733-d9ec6d9e81f0-kube-api-access-5wn4r\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-w5d4p\" (UID: \"579827b1-ab24-4853-8733-d9ec6d9e81f0\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p" Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.494478 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/579827b1-ab24-4853-8733-d9ec6d9e81f0-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-w5d4p\" (UID: \"579827b1-ab24-4853-8733-d9ec6d9e81f0\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p" Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.494548 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/579827b1-ab24-4853-8733-d9ec6d9e81f0-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-w5d4p\" (UID: \"579827b1-ab24-4853-8733-d9ec6d9e81f0\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p" Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.494864 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wn4r\" (UniqueName: \"kubernetes.io/projected/579827b1-ab24-4853-8733-d9ec6d9e81f0-kube-api-access-5wn4r\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-w5d4p\" (UID: \"579827b1-ab24-4853-8733-d9ec6d9e81f0\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p" Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.501960 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/579827b1-ab24-4853-8733-d9ec6d9e81f0-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-w5d4p\" (UID: \"579827b1-ab24-4853-8733-d9ec6d9e81f0\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p" Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.509810 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/579827b1-ab24-4853-8733-d9ec6d9e81f0-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-w5d4p\" (UID: \"579827b1-ab24-4853-8733-d9ec6d9e81f0\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p" Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.529888 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wn4r\" (UniqueName: \"kubernetes.io/projected/579827b1-ab24-4853-8733-d9ec6d9e81f0-kube-api-access-5wn4r\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-w5d4p\" (UID: \"579827b1-ab24-4853-8733-d9ec6d9e81f0\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p" Nov 25 23:29:41 crc kubenswrapper[5045]: I1125 23:29:41.646501 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p" Nov 25 23:29:42 crc kubenswrapper[5045]: I1125 23:29:42.066461 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p"] Nov 25 23:29:42 crc kubenswrapper[5045]: I1125 23:29:42.226786 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p" event={"ID":"579827b1-ab24-4853-8733-d9ec6d9e81f0","Type":"ContainerStarted","Data":"66c7008c73abcaa0eb215299b4d95e24ec7b25622aca303b6f03da5a84697734"} Nov 25 23:29:43 crc kubenswrapper[5045]: I1125 23:29:43.238785 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p" event={"ID":"579827b1-ab24-4853-8733-d9ec6d9e81f0","Type":"ContainerStarted","Data":"fa2ae70d3e092c430dc865e69c53b82de73489acf8306eb68fb2b8ae74d4c9e4"} Nov 25 23:29:43 crc kubenswrapper[5045]: I1125 23:29:43.264744 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p" podStartSLOduration=1.867821972 podStartE2EDuration="2.264689582s" podCreationTimestamp="2025-11-25 23:29:41 +0000 UTC" firstStartedPulling="2025-11-25 23:29:42.070220441 +0000 UTC m=+1838.427879563" lastFinishedPulling="2025-11-25 23:29:42.467088021 +0000 UTC m=+1838.824747173" observedRunningTime="2025-11-25 23:29:43.254555142 +0000 UTC m=+1839.612214274" watchObservedRunningTime="2025-11-25 23:29:43.264689582 +0000 UTC m=+1839.622348704" Nov 25 23:29:45 crc kubenswrapper[5045]: I1125 23:29:45.083524 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-gcz6n"] Nov 25 23:29:45 crc kubenswrapper[5045]: I1125 23:29:45.097151 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-gcz6n"] Nov 25 23:29:46 crc kubenswrapper[5045]: I1125 23:29:46.418183 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1076c0c8-e031-42c0-9978-5ca0d1cbd401" path="/var/lib/kubelet/pods/1076c0c8-e031-42c0-9978-5ca0d1cbd401/volumes" Nov 25 23:29:50 crc kubenswrapper[5045]: I1125 23:29:50.397142 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:29:50 crc kubenswrapper[5045]: E1125 23:29:50.399356 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:29:52 crc kubenswrapper[5045]: I1125 23:29:52.346241 5045 generic.go:334] "Generic (PLEG): container finished" podID="579827b1-ab24-4853-8733-d9ec6d9e81f0" containerID="fa2ae70d3e092c430dc865e69c53b82de73489acf8306eb68fb2b8ae74d4c9e4" exitCode=0 Nov 25 23:29:52 crc kubenswrapper[5045]: I1125 23:29:52.346336 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p" event={"ID":"579827b1-ab24-4853-8733-d9ec6d9e81f0","Type":"ContainerDied","Data":"fa2ae70d3e092c430dc865e69c53b82de73489acf8306eb68fb2b8ae74d4c9e4"} Nov 25 23:29:53 crc kubenswrapper[5045]: I1125 23:29:53.892638 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p" Nov 25 23:29:53 crc kubenswrapper[5045]: I1125 23:29:53.960803 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/579827b1-ab24-4853-8733-d9ec6d9e81f0-inventory\") pod \"579827b1-ab24-4853-8733-d9ec6d9e81f0\" (UID: \"579827b1-ab24-4853-8733-d9ec6d9e81f0\") " Nov 25 23:29:53 crc kubenswrapper[5045]: I1125 23:29:53.960937 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5wn4r\" (UniqueName: \"kubernetes.io/projected/579827b1-ab24-4853-8733-d9ec6d9e81f0-kube-api-access-5wn4r\") pod \"579827b1-ab24-4853-8733-d9ec6d9e81f0\" (UID: \"579827b1-ab24-4853-8733-d9ec6d9e81f0\") " Nov 25 23:29:53 crc kubenswrapper[5045]: I1125 23:29:53.960994 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/579827b1-ab24-4853-8733-d9ec6d9e81f0-ssh-key\") pod \"579827b1-ab24-4853-8733-d9ec6d9e81f0\" (UID: \"579827b1-ab24-4853-8733-d9ec6d9e81f0\") " Nov 25 23:29:53 crc kubenswrapper[5045]: I1125 23:29:53.970335 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/579827b1-ab24-4853-8733-d9ec6d9e81f0-kube-api-access-5wn4r" (OuterVolumeSpecName: "kube-api-access-5wn4r") pod "579827b1-ab24-4853-8733-d9ec6d9e81f0" (UID: "579827b1-ab24-4853-8733-d9ec6d9e81f0"). InnerVolumeSpecName "kube-api-access-5wn4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.007612 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/579827b1-ab24-4853-8733-d9ec6d9e81f0-inventory" (OuterVolumeSpecName: "inventory") pod "579827b1-ab24-4853-8733-d9ec6d9e81f0" (UID: "579827b1-ab24-4853-8733-d9ec6d9e81f0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.009356 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/579827b1-ab24-4853-8733-d9ec6d9e81f0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "579827b1-ab24-4853-8733-d9ec6d9e81f0" (UID: "579827b1-ab24-4853-8733-d9ec6d9e81f0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.063296 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/579827b1-ab24-4853-8733-d9ec6d9e81f0-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.063325 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5wn4r\" (UniqueName: \"kubernetes.io/projected/579827b1-ab24-4853-8733-d9ec6d9e81f0-kube-api-access-5wn4r\") on node \"crc\" DevicePath \"\"" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.063336 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/579827b1-ab24-4853-8733-d9ec6d9e81f0-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.374859 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p" event={"ID":"579827b1-ab24-4853-8733-d9ec6d9e81f0","Type":"ContainerDied","Data":"66c7008c73abcaa0eb215299b4d95e24ec7b25622aca303b6f03da5a84697734"} Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.374915 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="66c7008c73abcaa0eb215299b4d95e24ec7b25622aca303b6f03da5a84697734" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.374947 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.460299 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc"] Nov 25 23:29:54 crc kubenswrapper[5045]: E1125 23:29:54.462282 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="579827b1-ab24-4853-8733-d9ec6d9e81f0" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.462314 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="579827b1-ab24-4853-8733-d9ec6d9e81f0" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.462609 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="579827b1-ab24-4853-8733-d9ec6d9e81f0" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.463627 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.467001 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.467027 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.467134 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.469198 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.478273 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc"] Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.573043 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlhwg\" (UniqueName: \"kubernetes.io/projected/efb0b7f5-854d-495c-b19a-1fb1bcd60018-kube-api-access-wlhwg\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc\" (UID: \"efb0b7f5-854d-495c-b19a-1fb1bcd60018\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.573166 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/efb0b7f5-854d-495c-b19a-1fb1bcd60018-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc\" (UID: \"efb0b7f5-854d-495c-b19a-1fb1bcd60018\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.573408 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/efb0b7f5-854d-495c-b19a-1fb1bcd60018-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc\" (UID: \"efb0b7f5-854d-495c-b19a-1fb1bcd60018\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.675465 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlhwg\" (UniqueName: \"kubernetes.io/projected/efb0b7f5-854d-495c-b19a-1fb1bcd60018-kube-api-access-wlhwg\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc\" (UID: \"efb0b7f5-854d-495c-b19a-1fb1bcd60018\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.675688 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/efb0b7f5-854d-495c-b19a-1fb1bcd60018-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc\" (UID: \"efb0b7f5-854d-495c-b19a-1fb1bcd60018\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.675921 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/efb0b7f5-854d-495c-b19a-1fb1bcd60018-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc\" (UID: \"efb0b7f5-854d-495c-b19a-1fb1bcd60018\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.682610 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/efb0b7f5-854d-495c-b19a-1fb1bcd60018-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc\" (UID: \"efb0b7f5-854d-495c-b19a-1fb1bcd60018\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.683301 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/efb0b7f5-854d-495c-b19a-1fb1bcd60018-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc\" (UID: \"efb0b7f5-854d-495c-b19a-1fb1bcd60018\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.705882 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlhwg\" (UniqueName: \"kubernetes.io/projected/efb0b7f5-854d-495c-b19a-1fb1bcd60018-kube-api-access-wlhwg\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc\" (UID: \"efb0b7f5-854d-495c-b19a-1fb1bcd60018\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc" Nov 25 23:29:54 crc kubenswrapper[5045]: I1125 23:29:54.796763 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc" Nov 25 23:29:55 crc kubenswrapper[5045]: I1125 23:29:55.367639 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc"] Nov 25 23:29:55 crc kubenswrapper[5045]: I1125 23:29:55.382546 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc" event={"ID":"efb0b7f5-854d-495c-b19a-1fb1bcd60018","Type":"ContainerStarted","Data":"d09f612b2d55404921ccfeb8a074d0dd65e48cde7c329ae9304c421381b1441a"} Nov 25 23:29:56 crc kubenswrapper[5045]: I1125 23:29:56.423945 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc" event={"ID":"efb0b7f5-854d-495c-b19a-1fb1bcd60018","Type":"ContainerStarted","Data":"bc530f20d14c755c27c894f1d9eeb6f554e24bdf576583bef6e876b8c45e89b8"} Nov 25 23:29:56 crc kubenswrapper[5045]: I1125 23:29:56.447041 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc" podStartSLOduration=1.9879068979999999 podStartE2EDuration="2.447018917s" podCreationTimestamp="2025-11-25 23:29:54 +0000 UTC" firstStartedPulling="2025-11-25 23:29:55.367212242 +0000 UTC m=+1851.724871354" lastFinishedPulling="2025-11-25 23:29:55.826324251 +0000 UTC m=+1852.183983373" observedRunningTime="2025-11-25 23:29:56.433000916 +0000 UTC m=+1852.790660058" watchObservedRunningTime="2025-11-25 23:29:56.447018917 +0000 UTC m=+1852.804678039" Nov 25 23:30:00 crc kubenswrapper[5045]: I1125 23:30:00.186676 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd"] Nov 25 23:30:00 crc kubenswrapper[5045]: I1125 23:30:00.189237 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd" Nov 25 23:30:00 crc kubenswrapper[5045]: I1125 23:30:00.191708 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 23:30:00 crc kubenswrapper[5045]: I1125 23:30:00.193493 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 23:30:00 crc kubenswrapper[5045]: I1125 23:30:00.197639 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd"] Nov 25 23:30:00 crc kubenswrapper[5045]: I1125 23:30:00.316773 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc-secret-volume\") pod \"collect-profiles-29401890-bsptd\" (UID: \"a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd" Nov 25 23:30:00 crc kubenswrapper[5045]: I1125 23:30:00.316847 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc-config-volume\") pod \"collect-profiles-29401890-bsptd\" (UID: \"a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd" Nov 25 23:30:00 crc kubenswrapper[5045]: I1125 23:30:00.316882 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcrfs\" (UniqueName: \"kubernetes.io/projected/a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc-kube-api-access-bcrfs\") pod \"collect-profiles-29401890-bsptd\" (UID: \"a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd" Nov 25 23:30:00 crc kubenswrapper[5045]: I1125 23:30:00.418333 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc-secret-volume\") pod \"collect-profiles-29401890-bsptd\" (UID: \"a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd" Nov 25 23:30:00 crc kubenswrapper[5045]: I1125 23:30:00.418370 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc-config-volume\") pod \"collect-profiles-29401890-bsptd\" (UID: \"a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd" Nov 25 23:30:00 crc kubenswrapper[5045]: I1125 23:30:00.418392 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcrfs\" (UniqueName: \"kubernetes.io/projected/a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc-kube-api-access-bcrfs\") pod \"collect-profiles-29401890-bsptd\" (UID: \"a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd" Nov 25 23:30:00 crc kubenswrapper[5045]: I1125 23:30:00.420191 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc-config-volume\") pod \"collect-profiles-29401890-bsptd\" (UID: \"a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd" Nov 25 23:30:00 crc kubenswrapper[5045]: I1125 23:30:00.424965 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc-secret-volume\") pod \"collect-profiles-29401890-bsptd\" (UID: \"a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd" Nov 25 23:30:00 crc kubenswrapper[5045]: I1125 23:30:00.439376 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcrfs\" (UniqueName: \"kubernetes.io/projected/a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc-kube-api-access-bcrfs\") pod \"collect-profiles-29401890-bsptd\" (UID: \"a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd" Nov 25 23:30:00 crc kubenswrapper[5045]: I1125 23:30:00.538016 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd" Nov 25 23:30:01 crc kubenswrapper[5045]: I1125 23:30:01.045094 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-4psjq"] Nov 25 23:30:01 crc kubenswrapper[5045]: I1125 23:30:01.056904 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-4psjq"] Nov 25 23:30:01 crc kubenswrapper[5045]: I1125 23:30:01.619534 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd"] Nov 25 23:30:02 crc kubenswrapper[5045]: I1125 23:30:02.027986 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vd62p"] Nov 25 23:30:02 crc kubenswrapper[5045]: I1125 23:30:02.039999 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vd62p"] Nov 25 23:30:02 crc kubenswrapper[5045]: I1125 23:30:02.418993 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3327fc22-a987-47ad-a327-e5a7a2306f6e" path="/var/lib/kubelet/pods/3327fc22-a987-47ad-a327-e5a7a2306f6e/volumes" Nov 25 23:30:02 crc kubenswrapper[5045]: I1125 23:30:02.420639 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6db602b1-d6b8-410c-bd2f-fad64474ac8f" path="/var/lib/kubelet/pods/6db602b1-d6b8-410c-bd2f-fad64474ac8f/volumes" Nov 25 23:30:02 crc kubenswrapper[5045]: I1125 23:30:02.471175 5045 generic.go:334] "Generic (PLEG): container finished" podID="a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc" containerID="65843bade3fbaae09606c09fb4c538518d062b2177c6e9a847204522e5be4601" exitCode=0 Nov 25 23:30:02 crc kubenswrapper[5045]: I1125 23:30:02.471235 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd" event={"ID":"a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc","Type":"ContainerDied","Data":"65843bade3fbaae09606c09fb4c538518d062b2177c6e9a847204522e5be4601"} Nov 25 23:30:02 crc kubenswrapper[5045]: I1125 23:30:02.471275 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd" event={"ID":"a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc","Type":"ContainerStarted","Data":"9d7622fbfa64e11aa84d6215b1511b6b912a228a3571eb05734a7508b4b1ae9c"} Nov 25 23:30:03 crc kubenswrapper[5045]: I1125 23:30:03.396612 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:30:03 crc kubenswrapper[5045]: E1125 23:30:03.397070 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:30:03 crc kubenswrapper[5045]: I1125 23:30:03.953199 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd" Nov 25 23:30:04 crc kubenswrapper[5045]: I1125 23:30:04.097147 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc-secret-volume\") pod \"a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc\" (UID: \"a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc\") " Nov 25 23:30:04 crc kubenswrapper[5045]: I1125 23:30:04.097236 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bcrfs\" (UniqueName: \"kubernetes.io/projected/a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc-kube-api-access-bcrfs\") pod \"a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc\" (UID: \"a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc\") " Nov 25 23:30:04 crc kubenswrapper[5045]: I1125 23:30:04.097283 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc-config-volume\") pod \"a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc\" (UID: \"a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc\") " Nov 25 23:30:04 crc kubenswrapper[5045]: I1125 23:30:04.097984 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc-config-volume" (OuterVolumeSpecName: "config-volume") pod "a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc" (UID: "a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:30:04 crc kubenswrapper[5045]: I1125 23:30:04.102611 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc-kube-api-access-bcrfs" (OuterVolumeSpecName: "kube-api-access-bcrfs") pod "a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc" (UID: "a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc"). InnerVolumeSpecName "kube-api-access-bcrfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:30:04 crc kubenswrapper[5045]: I1125 23:30:04.103848 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc" (UID: "a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:30:04 crc kubenswrapper[5045]: I1125 23:30:04.199570 5045 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 23:30:04 crc kubenswrapper[5045]: I1125 23:30:04.199897 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bcrfs\" (UniqueName: \"kubernetes.io/projected/a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc-kube-api-access-bcrfs\") on node \"crc\" DevicePath \"\"" Nov 25 23:30:04 crc kubenswrapper[5045]: I1125 23:30:04.199918 5045 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 23:30:04 crc kubenswrapper[5045]: I1125 23:30:04.511472 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd" event={"ID":"a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc","Type":"ContainerDied","Data":"9d7622fbfa64e11aa84d6215b1511b6b912a228a3571eb05734a7508b4b1ae9c"} Nov 25 23:30:04 crc kubenswrapper[5045]: I1125 23:30:04.511546 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd" Nov 25 23:30:04 crc kubenswrapper[5045]: I1125 23:30:04.511550 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d7622fbfa64e11aa84d6215b1511b6b912a228a3571eb05734a7508b4b1ae9c" Nov 25 23:30:04 crc kubenswrapper[5045]: E1125 23:30:04.715847 5045 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2cd6d72_6e2a_433a_b0f7_dd5fa0a597cc.slice/crio-9d7622fbfa64e11aa84d6215b1511b6b912a228a3571eb05734a7508b4b1ae9c\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2cd6d72_6e2a_433a_b0f7_dd5fa0a597cc.slice\": RecentStats: unable to find data in memory cache]" Nov 25 23:30:06 crc kubenswrapper[5045]: I1125 23:30:06.535120 5045 generic.go:334] "Generic (PLEG): container finished" podID="efb0b7f5-854d-495c-b19a-1fb1bcd60018" containerID="bc530f20d14c755c27c894f1d9eeb6f554e24bdf576583bef6e876b8c45e89b8" exitCode=0 Nov 25 23:30:06 crc kubenswrapper[5045]: I1125 23:30:06.535274 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc" event={"ID":"efb0b7f5-854d-495c-b19a-1fb1bcd60018","Type":"ContainerDied","Data":"bc530f20d14c755c27c894f1d9eeb6f554e24bdf576583bef6e876b8c45e89b8"} Nov 25 23:30:08 crc kubenswrapper[5045]: I1125 23:30:08.019701 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc" Nov 25 23:30:08 crc kubenswrapper[5045]: I1125 23:30:08.195934 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/efb0b7f5-854d-495c-b19a-1fb1bcd60018-inventory\") pod \"efb0b7f5-854d-495c-b19a-1fb1bcd60018\" (UID: \"efb0b7f5-854d-495c-b19a-1fb1bcd60018\") " Nov 25 23:30:08 crc kubenswrapper[5045]: I1125 23:30:08.196354 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wlhwg\" (UniqueName: \"kubernetes.io/projected/efb0b7f5-854d-495c-b19a-1fb1bcd60018-kube-api-access-wlhwg\") pod \"efb0b7f5-854d-495c-b19a-1fb1bcd60018\" (UID: \"efb0b7f5-854d-495c-b19a-1fb1bcd60018\") " Nov 25 23:30:08 crc kubenswrapper[5045]: I1125 23:30:08.196586 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/efb0b7f5-854d-495c-b19a-1fb1bcd60018-ssh-key\") pod \"efb0b7f5-854d-495c-b19a-1fb1bcd60018\" (UID: \"efb0b7f5-854d-495c-b19a-1fb1bcd60018\") " Nov 25 23:30:08 crc kubenswrapper[5045]: I1125 23:30:08.201079 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efb0b7f5-854d-495c-b19a-1fb1bcd60018-kube-api-access-wlhwg" (OuterVolumeSpecName: "kube-api-access-wlhwg") pod "efb0b7f5-854d-495c-b19a-1fb1bcd60018" (UID: "efb0b7f5-854d-495c-b19a-1fb1bcd60018"). InnerVolumeSpecName "kube-api-access-wlhwg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:30:08 crc kubenswrapper[5045]: I1125 23:30:08.235485 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efb0b7f5-854d-495c-b19a-1fb1bcd60018-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "efb0b7f5-854d-495c-b19a-1fb1bcd60018" (UID: "efb0b7f5-854d-495c-b19a-1fb1bcd60018"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:30:08 crc kubenswrapper[5045]: I1125 23:30:08.241926 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efb0b7f5-854d-495c-b19a-1fb1bcd60018-inventory" (OuterVolumeSpecName: "inventory") pod "efb0b7f5-854d-495c-b19a-1fb1bcd60018" (UID: "efb0b7f5-854d-495c-b19a-1fb1bcd60018"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:30:08 crc kubenswrapper[5045]: I1125 23:30:08.299961 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/efb0b7f5-854d-495c-b19a-1fb1bcd60018-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:30:08 crc kubenswrapper[5045]: I1125 23:30:08.300275 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wlhwg\" (UniqueName: \"kubernetes.io/projected/efb0b7f5-854d-495c-b19a-1fb1bcd60018-kube-api-access-wlhwg\") on node \"crc\" DevicePath \"\"" Nov 25 23:30:08 crc kubenswrapper[5045]: I1125 23:30:08.300403 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/efb0b7f5-854d-495c-b19a-1fb1bcd60018-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:30:08 crc kubenswrapper[5045]: I1125 23:30:08.562055 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc" event={"ID":"efb0b7f5-854d-495c-b19a-1fb1bcd60018","Type":"ContainerDied","Data":"d09f612b2d55404921ccfeb8a074d0dd65e48cde7c329ae9304c421381b1441a"} Nov 25 23:30:08 crc kubenswrapper[5045]: I1125 23:30:08.562130 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d09f612b2d55404921ccfeb8a074d0dd65e48cde7c329ae9304c421381b1441a" Nov 25 23:30:08 crc kubenswrapper[5045]: I1125 23:30:08.562140 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc" Nov 25 23:30:14 crc kubenswrapper[5045]: E1125 23:30:14.966248 5045 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2cd6d72_6e2a_433a_b0f7_dd5fa0a597cc.slice/crio-9d7622fbfa64e11aa84d6215b1511b6b912a228a3571eb05734a7508b4b1ae9c\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2cd6d72_6e2a_433a_b0f7_dd5fa0a597cc.slice\": RecentStats: unable to find data in memory cache]" Nov 25 23:30:18 crc kubenswrapper[5045]: I1125 23:30:18.397333 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:30:18 crc kubenswrapper[5045]: E1125 23:30:18.398188 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:30:19 crc kubenswrapper[5045]: I1125 23:30:19.873132 5045 scope.go:117] "RemoveContainer" containerID="e2f5b5fa1a31c1ed4a0841060c620289e8c2ed72d328c8fe54223016ffe240d0" Nov 25 23:30:19 crc kubenswrapper[5045]: I1125 23:30:19.936209 5045 scope.go:117] "RemoveContainer" containerID="e6e9d3aac4bd2915e49caed05c7b9dde8a2c55d5aea94ba697da6d3fca729ffb" Nov 25 23:30:19 crc kubenswrapper[5045]: I1125 23:30:19.989218 5045 scope.go:117] "RemoveContainer" containerID="5f906f9aa6637b72df37cd3da3698e1f39ff90b438c626f71a25fb8725a00638" Nov 25 23:30:25 crc kubenswrapper[5045]: E1125 23:30:25.219206 5045 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2cd6d72_6e2a_433a_b0f7_dd5fa0a597cc.slice/crio-9d7622fbfa64e11aa84d6215b1511b6b912a228a3571eb05734a7508b4b1ae9c\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2cd6d72_6e2a_433a_b0f7_dd5fa0a597cc.slice\": RecentStats: unable to find data in memory cache]" Nov 25 23:30:33 crc kubenswrapper[5045]: I1125 23:30:33.398234 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:30:33 crc kubenswrapper[5045]: I1125 23:30:33.869439 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerStarted","Data":"a33f156d04cc1e10a7f2c15e4d00ab8b26b2ed9c77c496c8385c52559bc0d4b3"} Nov 25 23:30:35 crc kubenswrapper[5045]: E1125 23:30:35.501881 5045 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2cd6d72_6e2a_433a_b0f7_dd5fa0a597cc.slice/crio-9d7622fbfa64e11aa84d6215b1511b6b912a228a3571eb05734a7508b4b1ae9c\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2cd6d72_6e2a_433a_b0f7_dd5fa0a597cc.slice\": RecentStats: unable to find data in memory cache]" Nov 25 23:30:45 crc kubenswrapper[5045]: E1125 23:30:45.815885 5045 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2cd6d72_6e2a_433a_b0f7_dd5fa0a597cc.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2cd6d72_6e2a_433a_b0f7_dd5fa0a597cc.slice/crio-9d7622fbfa64e11aa84d6215b1511b6b912a228a3571eb05734a7508b4b1ae9c\": RecentStats: unable to find data in memory cache]" Nov 25 23:30:47 crc kubenswrapper[5045]: I1125 23:30:47.066620 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-kbnmb"] Nov 25 23:30:47 crc kubenswrapper[5045]: I1125 23:30:47.080646 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-kbnmb"] Nov 25 23:30:48 crc kubenswrapper[5045]: I1125 23:30:48.416228 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c385bb99-2964-4b34-b514-9d2c1a01f26d" path="/var/lib/kubelet/pods/c385bb99-2964-4b34-b514-9d2c1a01f26d/volumes" Nov 25 23:30:56 crc kubenswrapper[5045]: E1125 23:30:56.054360 5045 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2cd6d72_6e2a_433a_b0f7_dd5fa0a597cc.slice/crio-9d7622fbfa64e11aa84d6215b1511b6b912a228a3571eb05734a7508b4b1ae9c\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2cd6d72_6e2a_433a_b0f7_dd5fa0a597cc.slice\": RecentStats: unable to find data in memory cache]" Nov 25 23:31:20 crc kubenswrapper[5045]: I1125 23:31:20.104049 5045 scope.go:117] "RemoveContainer" containerID="a7ac03776d3512855db8907eb27b209873d6c2af32991119985120a09547f792" Nov 25 23:31:35 crc kubenswrapper[5045]: I1125 23:31:35.594548 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-d2252"] Nov 25 23:31:35 crc kubenswrapper[5045]: E1125 23:31:35.597084 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc" containerName="collect-profiles" Nov 25 23:31:35 crc kubenswrapper[5045]: I1125 23:31:35.597168 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc" containerName="collect-profiles" Nov 25 23:31:35 crc kubenswrapper[5045]: E1125 23:31:35.597342 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efb0b7f5-854d-495c-b19a-1fb1bcd60018" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:31:35 crc kubenswrapper[5045]: I1125 23:31:35.597359 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="efb0b7f5-854d-495c-b19a-1fb1bcd60018" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:31:35 crc kubenswrapper[5045]: I1125 23:31:35.598777 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc" containerName="collect-profiles" Nov 25 23:31:35 crc kubenswrapper[5045]: I1125 23:31:35.598884 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="efb0b7f5-854d-495c-b19a-1fb1bcd60018" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:31:35 crc kubenswrapper[5045]: I1125 23:31:35.603909 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d2252"] Nov 25 23:31:35 crc kubenswrapper[5045]: I1125 23:31:35.603954 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d2252" Nov 25 23:31:35 crc kubenswrapper[5045]: I1125 23:31:35.699504 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5mgk\" (UniqueName: \"kubernetes.io/projected/e7dd8048-b575-4763-a566-edfaff13d5dd-kube-api-access-j5mgk\") pod \"community-operators-d2252\" (UID: \"e7dd8048-b575-4763-a566-edfaff13d5dd\") " pod="openshift-marketplace/community-operators-d2252" Nov 25 23:31:35 crc kubenswrapper[5045]: I1125 23:31:35.699569 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7dd8048-b575-4763-a566-edfaff13d5dd-utilities\") pod \"community-operators-d2252\" (UID: \"e7dd8048-b575-4763-a566-edfaff13d5dd\") " pod="openshift-marketplace/community-operators-d2252" Nov 25 23:31:35 crc kubenswrapper[5045]: I1125 23:31:35.699954 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7dd8048-b575-4763-a566-edfaff13d5dd-catalog-content\") pod \"community-operators-d2252\" (UID: \"e7dd8048-b575-4763-a566-edfaff13d5dd\") " pod="openshift-marketplace/community-operators-d2252" Nov 25 23:31:35 crc kubenswrapper[5045]: I1125 23:31:35.802603 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5mgk\" (UniqueName: \"kubernetes.io/projected/e7dd8048-b575-4763-a566-edfaff13d5dd-kube-api-access-j5mgk\") pod \"community-operators-d2252\" (UID: \"e7dd8048-b575-4763-a566-edfaff13d5dd\") " pod="openshift-marketplace/community-operators-d2252" Nov 25 23:31:35 crc kubenswrapper[5045]: I1125 23:31:35.803151 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7dd8048-b575-4763-a566-edfaff13d5dd-utilities\") pod \"community-operators-d2252\" (UID: \"e7dd8048-b575-4763-a566-edfaff13d5dd\") " pod="openshift-marketplace/community-operators-d2252" Nov 25 23:31:35 crc kubenswrapper[5045]: I1125 23:31:35.803309 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7dd8048-b575-4763-a566-edfaff13d5dd-catalog-content\") pod \"community-operators-d2252\" (UID: \"e7dd8048-b575-4763-a566-edfaff13d5dd\") " pod="openshift-marketplace/community-operators-d2252" Nov 25 23:31:35 crc kubenswrapper[5045]: I1125 23:31:35.804239 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7dd8048-b575-4763-a566-edfaff13d5dd-utilities\") pod \"community-operators-d2252\" (UID: \"e7dd8048-b575-4763-a566-edfaff13d5dd\") " pod="openshift-marketplace/community-operators-d2252" Nov 25 23:31:35 crc kubenswrapper[5045]: I1125 23:31:35.804290 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7dd8048-b575-4763-a566-edfaff13d5dd-catalog-content\") pod \"community-operators-d2252\" (UID: \"e7dd8048-b575-4763-a566-edfaff13d5dd\") " pod="openshift-marketplace/community-operators-d2252" Nov 25 23:31:35 crc kubenswrapper[5045]: I1125 23:31:35.837212 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5mgk\" (UniqueName: \"kubernetes.io/projected/e7dd8048-b575-4763-a566-edfaff13d5dd-kube-api-access-j5mgk\") pod \"community-operators-d2252\" (UID: \"e7dd8048-b575-4763-a566-edfaff13d5dd\") " pod="openshift-marketplace/community-operators-d2252" Nov 25 23:31:35 crc kubenswrapper[5045]: I1125 23:31:35.937353 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d2252" Nov 25 23:31:36 crc kubenswrapper[5045]: I1125 23:31:36.462215 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d2252"] Nov 25 23:31:36 crc kubenswrapper[5045]: W1125 23:31:36.467915 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode7dd8048_b575_4763_a566_edfaff13d5dd.slice/crio-f374d9b40e777c13b957fe74ee0a67d61cc2e052028130ee6a59e00f5b918027 WatchSource:0}: Error finding container f374d9b40e777c13b957fe74ee0a67d61cc2e052028130ee6a59e00f5b918027: Status 404 returned error can't find the container with id f374d9b40e777c13b957fe74ee0a67d61cc2e052028130ee6a59e00f5b918027 Nov 25 23:31:36 crc kubenswrapper[5045]: I1125 23:31:36.529594 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d2252" event={"ID":"e7dd8048-b575-4763-a566-edfaff13d5dd","Type":"ContainerStarted","Data":"f374d9b40e777c13b957fe74ee0a67d61cc2e052028130ee6a59e00f5b918027"} Nov 25 23:31:37 crc kubenswrapper[5045]: I1125 23:31:37.545339 5045 generic.go:334] "Generic (PLEG): container finished" podID="e7dd8048-b575-4763-a566-edfaff13d5dd" containerID="da8c4d62f66f10ebc84cfd69855b06368d8f19fe483b058dd6e5c7ae4817535f" exitCode=0 Nov 25 23:31:37 crc kubenswrapper[5045]: I1125 23:31:37.545412 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d2252" event={"ID":"e7dd8048-b575-4763-a566-edfaff13d5dd","Type":"ContainerDied","Data":"da8c4d62f66f10ebc84cfd69855b06368d8f19fe483b058dd6e5c7ae4817535f"} Nov 25 23:31:39 crc kubenswrapper[5045]: I1125 23:31:39.571757 5045 generic.go:334] "Generic (PLEG): container finished" podID="e7dd8048-b575-4763-a566-edfaff13d5dd" containerID="b962800f80926997168e7e781dde81efb23ea8d71597d51e8a224a207e4b3180" exitCode=0 Nov 25 23:31:39 crc kubenswrapper[5045]: I1125 23:31:39.571827 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d2252" event={"ID":"e7dd8048-b575-4763-a566-edfaff13d5dd","Type":"ContainerDied","Data":"b962800f80926997168e7e781dde81efb23ea8d71597d51e8a224a207e4b3180"} Nov 25 23:31:41 crc kubenswrapper[5045]: I1125 23:31:41.599448 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d2252" event={"ID":"e7dd8048-b575-4763-a566-edfaff13d5dd","Type":"ContainerStarted","Data":"56414128f8746fe79b62dc597d7437e518d4e46b18a6b71e0d90ec16daf1cc90"} Nov 25 23:31:41 crc kubenswrapper[5045]: I1125 23:31:41.623513 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-d2252" podStartSLOduration=3.810703535 podStartE2EDuration="6.623495888s" podCreationTimestamp="2025-11-25 23:31:35 +0000 UTC" firstStartedPulling="2025-11-25 23:31:37.549755058 +0000 UTC m=+1953.907414210" lastFinishedPulling="2025-11-25 23:31:40.362547411 +0000 UTC m=+1956.720206563" observedRunningTime="2025-11-25 23:31:41.622100628 +0000 UTC m=+1957.979759780" watchObservedRunningTime="2025-11-25 23:31:41.623495888 +0000 UTC m=+1957.981155020" Nov 25 23:31:45 crc kubenswrapper[5045]: I1125 23:31:45.938091 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-d2252" Nov 25 23:31:45 crc kubenswrapper[5045]: I1125 23:31:45.938977 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-d2252" Nov 25 23:31:46 crc kubenswrapper[5045]: I1125 23:31:46.026172 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-d2252" Nov 25 23:31:46 crc kubenswrapper[5045]: I1125 23:31:46.727706 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-d2252" Nov 25 23:31:46 crc kubenswrapper[5045]: I1125 23:31:46.810058 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d2252"] Nov 25 23:31:48 crc kubenswrapper[5045]: I1125 23:31:48.671484 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-d2252" podUID="e7dd8048-b575-4763-a566-edfaff13d5dd" containerName="registry-server" containerID="cri-o://56414128f8746fe79b62dc597d7437e518d4e46b18a6b71e0d90ec16daf1cc90" gracePeriod=2 Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.220672 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d2252" Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.293857 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j5mgk\" (UniqueName: \"kubernetes.io/projected/e7dd8048-b575-4763-a566-edfaff13d5dd-kube-api-access-j5mgk\") pod \"e7dd8048-b575-4763-a566-edfaff13d5dd\" (UID: \"e7dd8048-b575-4763-a566-edfaff13d5dd\") " Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.293929 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7dd8048-b575-4763-a566-edfaff13d5dd-utilities\") pod \"e7dd8048-b575-4763-a566-edfaff13d5dd\" (UID: \"e7dd8048-b575-4763-a566-edfaff13d5dd\") " Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.293971 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7dd8048-b575-4763-a566-edfaff13d5dd-catalog-content\") pod \"e7dd8048-b575-4763-a566-edfaff13d5dd\" (UID: \"e7dd8048-b575-4763-a566-edfaff13d5dd\") " Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.297204 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e7dd8048-b575-4763-a566-edfaff13d5dd-utilities" (OuterVolumeSpecName: "utilities") pod "e7dd8048-b575-4763-a566-edfaff13d5dd" (UID: "e7dd8048-b575-4763-a566-edfaff13d5dd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.311193 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7dd8048-b575-4763-a566-edfaff13d5dd-kube-api-access-j5mgk" (OuterVolumeSpecName: "kube-api-access-j5mgk") pod "e7dd8048-b575-4763-a566-edfaff13d5dd" (UID: "e7dd8048-b575-4763-a566-edfaff13d5dd"). InnerVolumeSpecName "kube-api-access-j5mgk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.361727 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e7dd8048-b575-4763-a566-edfaff13d5dd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e7dd8048-b575-4763-a566-edfaff13d5dd" (UID: "e7dd8048-b575-4763-a566-edfaff13d5dd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.401506 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j5mgk\" (UniqueName: \"kubernetes.io/projected/e7dd8048-b575-4763-a566-edfaff13d5dd-kube-api-access-j5mgk\") on node \"crc\" DevicePath \"\"" Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.401540 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7dd8048-b575-4763-a566-edfaff13d5dd-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.401550 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7dd8048-b575-4763-a566-edfaff13d5dd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.689132 5045 generic.go:334] "Generic (PLEG): container finished" podID="e7dd8048-b575-4763-a566-edfaff13d5dd" containerID="56414128f8746fe79b62dc597d7437e518d4e46b18a6b71e0d90ec16daf1cc90" exitCode=0 Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.689272 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d2252" Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.689259 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d2252" event={"ID":"e7dd8048-b575-4763-a566-edfaff13d5dd","Type":"ContainerDied","Data":"56414128f8746fe79b62dc597d7437e518d4e46b18a6b71e0d90ec16daf1cc90"} Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.689816 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d2252" event={"ID":"e7dd8048-b575-4763-a566-edfaff13d5dd","Type":"ContainerDied","Data":"f374d9b40e777c13b957fe74ee0a67d61cc2e052028130ee6a59e00f5b918027"} Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.689857 5045 scope.go:117] "RemoveContainer" containerID="56414128f8746fe79b62dc597d7437e518d4e46b18a6b71e0d90ec16daf1cc90" Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.738304 5045 scope.go:117] "RemoveContainer" containerID="b962800f80926997168e7e781dde81efb23ea8d71597d51e8a224a207e4b3180" Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.748912 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d2252"] Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.761574 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-d2252"] Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.778307 5045 scope.go:117] "RemoveContainer" containerID="da8c4d62f66f10ebc84cfd69855b06368d8f19fe483b058dd6e5c7ae4817535f" Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.842573 5045 scope.go:117] "RemoveContainer" containerID="56414128f8746fe79b62dc597d7437e518d4e46b18a6b71e0d90ec16daf1cc90" Nov 25 23:31:49 crc kubenswrapper[5045]: E1125 23:31:49.843299 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56414128f8746fe79b62dc597d7437e518d4e46b18a6b71e0d90ec16daf1cc90\": container with ID starting with 56414128f8746fe79b62dc597d7437e518d4e46b18a6b71e0d90ec16daf1cc90 not found: ID does not exist" containerID="56414128f8746fe79b62dc597d7437e518d4e46b18a6b71e0d90ec16daf1cc90" Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.843396 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56414128f8746fe79b62dc597d7437e518d4e46b18a6b71e0d90ec16daf1cc90"} err="failed to get container status \"56414128f8746fe79b62dc597d7437e518d4e46b18a6b71e0d90ec16daf1cc90\": rpc error: code = NotFound desc = could not find container \"56414128f8746fe79b62dc597d7437e518d4e46b18a6b71e0d90ec16daf1cc90\": container with ID starting with 56414128f8746fe79b62dc597d7437e518d4e46b18a6b71e0d90ec16daf1cc90 not found: ID does not exist" Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.843446 5045 scope.go:117] "RemoveContainer" containerID="b962800f80926997168e7e781dde81efb23ea8d71597d51e8a224a207e4b3180" Nov 25 23:31:49 crc kubenswrapper[5045]: E1125 23:31:49.844405 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b962800f80926997168e7e781dde81efb23ea8d71597d51e8a224a207e4b3180\": container with ID starting with b962800f80926997168e7e781dde81efb23ea8d71597d51e8a224a207e4b3180 not found: ID does not exist" containerID="b962800f80926997168e7e781dde81efb23ea8d71597d51e8a224a207e4b3180" Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.844493 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b962800f80926997168e7e781dde81efb23ea8d71597d51e8a224a207e4b3180"} err="failed to get container status \"b962800f80926997168e7e781dde81efb23ea8d71597d51e8a224a207e4b3180\": rpc error: code = NotFound desc = could not find container \"b962800f80926997168e7e781dde81efb23ea8d71597d51e8a224a207e4b3180\": container with ID starting with b962800f80926997168e7e781dde81efb23ea8d71597d51e8a224a207e4b3180 not found: ID does not exist" Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.844553 5045 scope.go:117] "RemoveContainer" containerID="da8c4d62f66f10ebc84cfd69855b06368d8f19fe483b058dd6e5c7ae4817535f" Nov 25 23:31:49 crc kubenswrapper[5045]: E1125 23:31:49.845045 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da8c4d62f66f10ebc84cfd69855b06368d8f19fe483b058dd6e5c7ae4817535f\": container with ID starting with da8c4d62f66f10ebc84cfd69855b06368d8f19fe483b058dd6e5c7ae4817535f not found: ID does not exist" containerID="da8c4d62f66f10ebc84cfd69855b06368d8f19fe483b058dd6e5c7ae4817535f" Nov 25 23:31:49 crc kubenswrapper[5045]: I1125 23:31:49.845113 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da8c4d62f66f10ebc84cfd69855b06368d8f19fe483b058dd6e5c7ae4817535f"} err="failed to get container status \"da8c4d62f66f10ebc84cfd69855b06368d8f19fe483b058dd6e5c7ae4817535f\": rpc error: code = NotFound desc = could not find container \"da8c4d62f66f10ebc84cfd69855b06368d8f19fe483b058dd6e5c7ae4817535f\": container with ID starting with da8c4d62f66f10ebc84cfd69855b06368d8f19fe483b058dd6e5c7ae4817535f not found: ID does not exist" Nov 25 23:31:50 crc kubenswrapper[5045]: I1125 23:31:50.415775 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7dd8048-b575-4763-a566-edfaff13d5dd" path="/var/lib/kubelet/pods/e7dd8048-b575-4763-a566-edfaff13d5dd/volumes" Nov 25 23:31:55 crc kubenswrapper[5045]: I1125 23:31:55.289774 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-q4cc8"] Nov 25 23:31:55 crc kubenswrapper[5045]: E1125 23:31:55.290700 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7dd8048-b575-4763-a566-edfaff13d5dd" containerName="extract-content" Nov 25 23:31:55 crc kubenswrapper[5045]: I1125 23:31:55.290729 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7dd8048-b575-4763-a566-edfaff13d5dd" containerName="extract-content" Nov 25 23:31:55 crc kubenswrapper[5045]: E1125 23:31:55.290752 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7dd8048-b575-4763-a566-edfaff13d5dd" containerName="extract-utilities" Nov 25 23:31:55 crc kubenswrapper[5045]: I1125 23:31:55.290758 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7dd8048-b575-4763-a566-edfaff13d5dd" containerName="extract-utilities" Nov 25 23:31:55 crc kubenswrapper[5045]: E1125 23:31:55.290776 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7dd8048-b575-4763-a566-edfaff13d5dd" containerName="registry-server" Nov 25 23:31:55 crc kubenswrapper[5045]: I1125 23:31:55.290782 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7dd8048-b575-4763-a566-edfaff13d5dd" containerName="registry-server" Nov 25 23:31:55 crc kubenswrapper[5045]: I1125 23:31:55.290949 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7dd8048-b575-4763-a566-edfaff13d5dd" containerName="registry-server" Nov 25 23:31:55 crc kubenswrapper[5045]: I1125 23:31:55.292092 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q4cc8" Nov 25 23:31:55 crc kubenswrapper[5045]: I1125 23:31:55.307418 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q4cc8"] Nov 25 23:31:55 crc kubenswrapper[5045]: I1125 23:31:55.351419 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z79vc\" (UniqueName: \"kubernetes.io/projected/1718512c-1af1-4fef-adeb-386ed42b3154-kube-api-access-z79vc\") pod \"redhat-operators-q4cc8\" (UID: \"1718512c-1af1-4fef-adeb-386ed42b3154\") " pod="openshift-marketplace/redhat-operators-q4cc8" Nov 25 23:31:55 crc kubenswrapper[5045]: I1125 23:31:55.351888 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1718512c-1af1-4fef-adeb-386ed42b3154-catalog-content\") pod \"redhat-operators-q4cc8\" (UID: \"1718512c-1af1-4fef-adeb-386ed42b3154\") " pod="openshift-marketplace/redhat-operators-q4cc8" Nov 25 23:31:55 crc kubenswrapper[5045]: I1125 23:31:55.352064 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1718512c-1af1-4fef-adeb-386ed42b3154-utilities\") pod \"redhat-operators-q4cc8\" (UID: \"1718512c-1af1-4fef-adeb-386ed42b3154\") " pod="openshift-marketplace/redhat-operators-q4cc8" Nov 25 23:31:55 crc kubenswrapper[5045]: I1125 23:31:55.454329 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z79vc\" (UniqueName: \"kubernetes.io/projected/1718512c-1af1-4fef-adeb-386ed42b3154-kube-api-access-z79vc\") pod \"redhat-operators-q4cc8\" (UID: \"1718512c-1af1-4fef-adeb-386ed42b3154\") " pod="openshift-marketplace/redhat-operators-q4cc8" Nov 25 23:31:55 crc kubenswrapper[5045]: I1125 23:31:55.454472 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1718512c-1af1-4fef-adeb-386ed42b3154-catalog-content\") pod \"redhat-operators-q4cc8\" (UID: \"1718512c-1af1-4fef-adeb-386ed42b3154\") " pod="openshift-marketplace/redhat-operators-q4cc8" Nov 25 23:31:55 crc kubenswrapper[5045]: I1125 23:31:55.454552 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1718512c-1af1-4fef-adeb-386ed42b3154-utilities\") pod \"redhat-operators-q4cc8\" (UID: \"1718512c-1af1-4fef-adeb-386ed42b3154\") " pod="openshift-marketplace/redhat-operators-q4cc8" Nov 25 23:31:55 crc kubenswrapper[5045]: I1125 23:31:55.454992 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1718512c-1af1-4fef-adeb-386ed42b3154-catalog-content\") pod \"redhat-operators-q4cc8\" (UID: \"1718512c-1af1-4fef-adeb-386ed42b3154\") " pod="openshift-marketplace/redhat-operators-q4cc8" Nov 25 23:31:55 crc kubenswrapper[5045]: I1125 23:31:55.455267 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1718512c-1af1-4fef-adeb-386ed42b3154-utilities\") pod \"redhat-operators-q4cc8\" (UID: \"1718512c-1af1-4fef-adeb-386ed42b3154\") " pod="openshift-marketplace/redhat-operators-q4cc8" Nov 25 23:31:55 crc kubenswrapper[5045]: I1125 23:31:55.477984 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z79vc\" (UniqueName: \"kubernetes.io/projected/1718512c-1af1-4fef-adeb-386ed42b3154-kube-api-access-z79vc\") pod \"redhat-operators-q4cc8\" (UID: \"1718512c-1af1-4fef-adeb-386ed42b3154\") " pod="openshift-marketplace/redhat-operators-q4cc8" Nov 25 23:31:55 crc kubenswrapper[5045]: I1125 23:31:55.613116 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q4cc8" Nov 25 23:31:56 crc kubenswrapper[5045]: I1125 23:31:56.052129 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q4cc8"] Nov 25 23:31:56 crc kubenswrapper[5045]: I1125 23:31:56.785354 5045 generic.go:334] "Generic (PLEG): container finished" podID="1718512c-1af1-4fef-adeb-386ed42b3154" containerID="ed6c2cfa59f2698cd21f1104ef03b7955ab603f2af5337fa78d9a10c30b639ed" exitCode=0 Nov 25 23:31:56 crc kubenswrapper[5045]: I1125 23:31:56.785441 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4cc8" event={"ID":"1718512c-1af1-4fef-adeb-386ed42b3154","Type":"ContainerDied","Data":"ed6c2cfa59f2698cd21f1104ef03b7955ab603f2af5337fa78d9a10c30b639ed"} Nov 25 23:31:56 crc kubenswrapper[5045]: I1125 23:31:56.785757 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4cc8" event={"ID":"1718512c-1af1-4fef-adeb-386ed42b3154","Type":"ContainerStarted","Data":"ec5836053e4bc08f5667cecb5c26f7768ccc283dfba5dc1e926a7c5e99e27c76"} Nov 25 23:31:57 crc kubenswrapper[5045]: I1125 23:31:57.803596 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4cc8" event={"ID":"1718512c-1af1-4fef-adeb-386ed42b3154","Type":"ContainerStarted","Data":"08c65009257582fcd5f50fbee6b65b0c47cdfde90275d35df1751bdb370a6e9d"} Nov 25 23:31:59 crc kubenswrapper[5045]: I1125 23:31:59.828877 5045 generic.go:334] "Generic (PLEG): container finished" podID="1718512c-1af1-4fef-adeb-386ed42b3154" containerID="08c65009257582fcd5f50fbee6b65b0c47cdfde90275d35df1751bdb370a6e9d" exitCode=0 Nov 25 23:31:59 crc kubenswrapper[5045]: I1125 23:31:59.828953 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4cc8" event={"ID":"1718512c-1af1-4fef-adeb-386ed42b3154","Type":"ContainerDied","Data":"08c65009257582fcd5f50fbee6b65b0c47cdfde90275d35df1751bdb370a6e9d"} Nov 25 23:32:00 crc kubenswrapper[5045]: I1125 23:32:00.846016 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4cc8" event={"ID":"1718512c-1af1-4fef-adeb-386ed42b3154","Type":"ContainerStarted","Data":"4ed951ceb6d8e932ed4ad8a2c7fd54d7a5eb3be6394df68563316d3ec5ec544f"} Nov 25 23:32:00 crc kubenswrapper[5045]: I1125 23:32:00.880037 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-q4cc8" podStartSLOduration=2.415670034 podStartE2EDuration="5.880016115s" podCreationTimestamp="2025-11-25 23:31:55 +0000 UTC" firstStartedPulling="2025-11-25 23:31:56.787465427 +0000 UTC m=+1973.145124549" lastFinishedPulling="2025-11-25 23:32:00.251811478 +0000 UTC m=+1976.609470630" observedRunningTime="2025-11-25 23:32:00.878543173 +0000 UTC m=+1977.236202295" watchObservedRunningTime="2025-11-25 23:32:00.880016115 +0000 UTC m=+1977.237675237" Nov 25 23:32:05 crc kubenswrapper[5045]: I1125 23:32:05.614169 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-q4cc8" Nov 25 23:32:05 crc kubenswrapper[5045]: I1125 23:32:05.616698 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-q4cc8" Nov 25 23:32:06 crc kubenswrapper[5045]: I1125 23:32:06.664968 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-q4cc8" podUID="1718512c-1af1-4fef-adeb-386ed42b3154" containerName="registry-server" probeResult="failure" output=< Nov 25 23:32:06 crc kubenswrapper[5045]: timeout: failed to connect service ":50051" within 1s Nov 25 23:32:06 crc kubenswrapper[5045]: > Nov 25 23:32:15 crc kubenswrapper[5045]: I1125 23:32:15.694745 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-q4cc8" Nov 25 23:32:15 crc kubenswrapper[5045]: I1125 23:32:15.775736 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-q4cc8" Nov 25 23:32:15 crc kubenswrapper[5045]: I1125 23:32:15.950818 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q4cc8"] Nov 25 23:32:17 crc kubenswrapper[5045]: I1125 23:32:17.043046 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-q4cc8" podUID="1718512c-1af1-4fef-adeb-386ed42b3154" containerName="registry-server" containerID="cri-o://4ed951ceb6d8e932ed4ad8a2c7fd54d7a5eb3be6394df68563316d3ec5ec544f" gracePeriod=2 Nov 25 23:32:17 crc kubenswrapper[5045]: I1125 23:32:17.509521 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q4cc8" Nov 25 23:32:17 crc kubenswrapper[5045]: I1125 23:32:17.525455 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1718512c-1af1-4fef-adeb-386ed42b3154-utilities\") pod \"1718512c-1af1-4fef-adeb-386ed42b3154\" (UID: \"1718512c-1af1-4fef-adeb-386ed42b3154\") " Nov 25 23:32:17 crc kubenswrapper[5045]: I1125 23:32:17.525528 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1718512c-1af1-4fef-adeb-386ed42b3154-catalog-content\") pod \"1718512c-1af1-4fef-adeb-386ed42b3154\" (UID: \"1718512c-1af1-4fef-adeb-386ed42b3154\") " Nov 25 23:32:17 crc kubenswrapper[5045]: I1125 23:32:17.525739 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z79vc\" (UniqueName: \"kubernetes.io/projected/1718512c-1af1-4fef-adeb-386ed42b3154-kube-api-access-z79vc\") pod \"1718512c-1af1-4fef-adeb-386ed42b3154\" (UID: \"1718512c-1af1-4fef-adeb-386ed42b3154\") " Nov 25 23:32:17 crc kubenswrapper[5045]: I1125 23:32:17.531188 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1718512c-1af1-4fef-adeb-386ed42b3154-utilities" (OuterVolumeSpecName: "utilities") pod "1718512c-1af1-4fef-adeb-386ed42b3154" (UID: "1718512c-1af1-4fef-adeb-386ed42b3154"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:32:17 crc kubenswrapper[5045]: I1125 23:32:17.542908 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1718512c-1af1-4fef-adeb-386ed42b3154-kube-api-access-z79vc" (OuterVolumeSpecName: "kube-api-access-z79vc") pod "1718512c-1af1-4fef-adeb-386ed42b3154" (UID: "1718512c-1af1-4fef-adeb-386ed42b3154"). InnerVolumeSpecName "kube-api-access-z79vc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:32:17 crc kubenswrapper[5045]: I1125 23:32:17.627767 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1718512c-1af1-4fef-adeb-386ed42b3154-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:32:17 crc kubenswrapper[5045]: I1125 23:32:17.627798 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z79vc\" (UniqueName: \"kubernetes.io/projected/1718512c-1af1-4fef-adeb-386ed42b3154-kube-api-access-z79vc\") on node \"crc\" DevicePath \"\"" Nov 25 23:32:17 crc kubenswrapper[5045]: I1125 23:32:17.656533 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1718512c-1af1-4fef-adeb-386ed42b3154-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1718512c-1af1-4fef-adeb-386ed42b3154" (UID: "1718512c-1af1-4fef-adeb-386ed42b3154"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:32:17 crc kubenswrapper[5045]: I1125 23:32:17.730342 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1718512c-1af1-4fef-adeb-386ed42b3154-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:32:18 crc kubenswrapper[5045]: I1125 23:32:18.113921 5045 generic.go:334] "Generic (PLEG): container finished" podID="1718512c-1af1-4fef-adeb-386ed42b3154" containerID="4ed951ceb6d8e932ed4ad8a2c7fd54d7a5eb3be6394df68563316d3ec5ec544f" exitCode=0 Nov 25 23:32:18 crc kubenswrapper[5045]: I1125 23:32:18.114335 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4cc8" event={"ID":"1718512c-1af1-4fef-adeb-386ed42b3154","Type":"ContainerDied","Data":"4ed951ceb6d8e932ed4ad8a2c7fd54d7a5eb3be6394df68563316d3ec5ec544f"} Nov 25 23:32:18 crc kubenswrapper[5045]: I1125 23:32:18.114373 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4cc8" event={"ID":"1718512c-1af1-4fef-adeb-386ed42b3154","Type":"ContainerDied","Data":"ec5836053e4bc08f5667cecb5c26f7768ccc283dfba5dc1e926a7c5e99e27c76"} Nov 25 23:32:18 crc kubenswrapper[5045]: I1125 23:32:18.114399 5045 scope.go:117] "RemoveContainer" containerID="4ed951ceb6d8e932ed4ad8a2c7fd54d7a5eb3be6394df68563316d3ec5ec544f" Nov 25 23:32:18 crc kubenswrapper[5045]: I1125 23:32:18.114608 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q4cc8" Nov 25 23:32:18 crc kubenswrapper[5045]: I1125 23:32:18.187666 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q4cc8"] Nov 25 23:32:18 crc kubenswrapper[5045]: I1125 23:32:18.206907 5045 scope.go:117] "RemoveContainer" containerID="08c65009257582fcd5f50fbee6b65b0c47cdfde90275d35df1751bdb370a6e9d" Nov 25 23:32:18 crc kubenswrapper[5045]: I1125 23:32:18.210345 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-q4cc8"] Nov 25 23:32:18 crc kubenswrapper[5045]: I1125 23:32:18.236859 5045 scope.go:117] "RemoveContainer" containerID="ed6c2cfa59f2698cd21f1104ef03b7955ab603f2af5337fa78d9a10c30b639ed" Nov 25 23:32:18 crc kubenswrapper[5045]: I1125 23:32:18.277340 5045 scope.go:117] "RemoveContainer" containerID="4ed951ceb6d8e932ed4ad8a2c7fd54d7a5eb3be6394df68563316d3ec5ec544f" Nov 25 23:32:18 crc kubenswrapper[5045]: E1125 23:32:18.277814 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ed951ceb6d8e932ed4ad8a2c7fd54d7a5eb3be6394df68563316d3ec5ec544f\": container with ID starting with 4ed951ceb6d8e932ed4ad8a2c7fd54d7a5eb3be6394df68563316d3ec5ec544f not found: ID does not exist" containerID="4ed951ceb6d8e932ed4ad8a2c7fd54d7a5eb3be6394df68563316d3ec5ec544f" Nov 25 23:32:18 crc kubenswrapper[5045]: I1125 23:32:18.277854 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ed951ceb6d8e932ed4ad8a2c7fd54d7a5eb3be6394df68563316d3ec5ec544f"} err="failed to get container status \"4ed951ceb6d8e932ed4ad8a2c7fd54d7a5eb3be6394df68563316d3ec5ec544f\": rpc error: code = NotFound desc = could not find container \"4ed951ceb6d8e932ed4ad8a2c7fd54d7a5eb3be6394df68563316d3ec5ec544f\": container with ID starting with 4ed951ceb6d8e932ed4ad8a2c7fd54d7a5eb3be6394df68563316d3ec5ec544f not found: ID does not exist" Nov 25 23:32:18 crc kubenswrapper[5045]: I1125 23:32:18.277880 5045 scope.go:117] "RemoveContainer" containerID="08c65009257582fcd5f50fbee6b65b0c47cdfde90275d35df1751bdb370a6e9d" Nov 25 23:32:18 crc kubenswrapper[5045]: E1125 23:32:18.278179 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08c65009257582fcd5f50fbee6b65b0c47cdfde90275d35df1751bdb370a6e9d\": container with ID starting with 08c65009257582fcd5f50fbee6b65b0c47cdfde90275d35df1751bdb370a6e9d not found: ID does not exist" containerID="08c65009257582fcd5f50fbee6b65b0c47cdfde90275d35df1751bdb370a6e9d" Nov 25 23:32:18 crc kubenswrapper[5045]: I1125 23:32:18.278202 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08c65009257582fcd5f50fbee6b65b0c47cdfde90275d35df1751bdb370a6e9d"} err="failed to get container status \"08c65009257582fcd5f50fbee6b65b0c47cdfde90275d35df1751bdb370a6e9d\": rpc error: code = NotFound desc = could not find container \"08c65009257582fcd5f50fbee6b65b0c47cdfde90275d35df1751bdb370a6e9d\": container with ID starting with 08c65009257582fcd5f50fbee6b65b0c47cdfde90275d35df1751bdb370a6e9d not found: ID does not exist" Nov 25 23:32:18 crc kubenswrapper[5045]: I1125 23:32:18.278216 5045 scope.go:117] "RemoveContainer" containerID="ed6c2cfa59f2698cd21f1104ef03b7955ab603f2af5337fa78d9a10c30b639ed" Nov 25 23:32:18 crc kubenswrapper[5045]: E1125 23:32:18.278847 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed6c2cfa59f2698cd21f1104ef03b7955ab603f2af5337fa78d9a10c30b639ed\": container with ID starting with ed6c2cfa59f2698cd21f1104ef03b7955ab603f2af5337fa78d9a10c30b639ed not found: ID does not exist" containerID="ed6c2cfa59f2698cd21f1104ef03b7955ab603f2af5337fa78d9a10c30b639ed" Nov 25 23:32:18 crc kubenswrapper[5045]: I1125 23:32:18.278888 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed6c2cfa59f2698cd21f1104ef03b7955ab603f2af5337fa78d9a10c30b639ed"} err="failed to get container status \"ed6c2cfa59f2698cd21f1104ef03b7955ab603f2af5337fa78d9a10c30b639ed\": rpc error: code = NotFound desc = could not find container \"ed6c2cfa59f2698cd21f1104ef03b7955ab603f2af5337fa78d9a10c30b639ed\": container with ID starting with ed6c2cfa59f2698cd21f1104ef03b7955ab603f2af5337fa78d9a10c30b639ed not found: ID does not exist" Nov 25 23:32:18 crc kubenswrapper[5045]: E1125 23:32:18.344324 5045 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1718512c_1af1_4fef_adeb_386ed42b3154.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1718512c_1af1_4fef_adeb_386ed42b3154.slice/crio-ec5836053e4bc08f5667cecb5c26f7768ccc283dfba5dc1e926a7c5e99e27c76\": RecentStats: unable to find data in memory cache]" Nov 25 23:32:18 crc kubenswrapper[5045]: I1125 23:32:18.407376 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1718512c-1af1-4fef-adeb-386ed42b3154" path="/var/lib/kubelet/pods/1718512c-1af1-4fef-adeb-386ed42b3154/volumes" Nov 25 23:33:00 crc kubenswrapper[5045]: I1125 23:33:00.541230 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:33:00 crc kubenswrapper[5045]: I1125 23:33:00.541823 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:33:30 crc kubenswrapper[5045]: I1125 23:33:30.540835 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:33:30 crc kubenswrapper[5045]: I1125 23:33:30.541598 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:33:38 crc kubenswrapper[5045]: I1125 23:33:38.526981 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ngpt7"] Nov 25 23:33:38 crc kubenswrapper[5045]: E1125 23:33:38.528550 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1718512c-1af1-4fef-adeb-386ed42b3154" containerName="extract-utilities" Nov 25 23:33:38 crc kubenswrapper[5045]: I1125 23:33:38.528575 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="1718512c-1af1-4fef-adeb-386ed42b3154" containerName="extract-utilities" Nov 25 23:33:38 crc kubenswrapper[5045]: E1125 23:33:38.528606 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1718512c-1af1-4fef-adeb-386ed42b3154" containerName="extract-content" Nov 25 23:33:38 crc kubenswrapper[5045]: I1125 23:33:38.528619 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="1718512c-1af1-4fef-adeb-386ed42b3154" containerName="extract-content" Nov 25 23:33:38 crc kubenswrapper[5045]: E1125 23:33:38.528661 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1718512c-1af1-4fef-adeb-386ed42b3154" containerName="registry-server" Nov 25 23:33:38 crc kubenswrapper[5045]: I1125 23:33:38.528678 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="1718512c-1af1-4fef-adeb-386ed42b3154" containerName="registry-server" Nov 25 23:33:38 crc kubenswrapper[5045]: I1125 23:33:38.529030 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="1718512c-1af1-4fef-adeb-386ed42b3154" containerName="registry-server" Nov 25 23:33:38 crc kubenswrapper[5045]: I1125 23:33:38.532990 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ngpt7" Nov 25 23:33:38 crc kubenswrapper[5045]: I1125 23:33:38.542425 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ngpt7"] Nov 25 23:33:38 crc kubenswrapper[5045]: I1125 23:33:38.559263 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1081ef2-bad7-4b60-a3d5-616b05e1228d-utilities\") pod \"redhat-marketplace-ngpt7\" (UID: \"a1081ef2-bad7-4b60-a3d5-616b05e1228d\") " pod="openshift-marketplace/redhat-marketplace-ngpt7" Nov 25 23:33:38 crc kubenswrapper[5045]: I1125 23:33:38.560059 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1081ef2-bad7-4b60-a3d5-616b05e1228d-catalog-content\") pod \"redhat-marketplace-ngpt7\" (UID: \"a1081ef2-bad7-4b60-a3d5-616b05e1228d\") " pod="openshift-marketplace/redhat-marketplace-ngpt7" Nov 25 23:33:38 crc kubenswrapper[5045]: I1125 23:33:38.560392 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gp5cs\" (UniqueName: \"kubernetes.io/projected/a1081ef2-bad7-4b60-a3d5-616b05e1228d-kube-api-access-gp5cs\") pod \"redhat-marketplace-ngpt7\" (UID: \"a1081ef2-bad7-4b60-a3d5-616b05e1228d\") " pod="openshift-marketplace/redhat-marketplace-ngpt7" Nov 25 23:33:38 crc kubenswrapper[5045]: I1125 23:33:38.661954 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gp5cs\" (UniqueName: \"kubernetes.io/projected/a1081ef2-bad7-4b60-a3d5-616b05e1228d-kube-api-access-gp5cs\") pod \"redhat-marketplace-ngpt7\" (UID: \"a1081ef2-bad7-4b60-a3d5-616b05e1228d\") " pod="openshift-marketplace/redhat-marketplace-ngpt7" Nov 25 23:33:38 crc kubenswrapper[5045]: I1125 23:33:38.662022 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1081ef2-bad7-4b60-a3d5-616b05e1228d-utilities\") pod \"redhat-marketplace-ngpt7\" (UID: \"a1081ef2-bad7-4b60-a3d5-616b05e1228d\") " pod="openshift-marketplace/redhat-marketplace-ngpt7" Nov 25 23:33:38 crc kubenswrapper[5045]: I1125 23:33:38.662111 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1081ef2-bad7-4b60-a3d5-616b05e1228d-catalog-content\") pod \"redhat-marketplace-ngpt7\" (UID: \"a1081ef2-bad7-4b60-a3d5-616b05e1228d\") " pod="openshift-marketplace/redhat-marketplace-ngpt7" Nov 25 23:33:38 crc kubenswrapper[5045]: I1125 23:33:38.662756 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1081ef2-bad7-4b60-a3d5-616b05e1228d-utilities\") pod \"redhat-marketplace-ngpt7\" (UID: \"a1081ef2-bad7-4b60-a3d5-616b05e1228d\") " pod="openshift-marketplace/redhat-marketplace-ngpt7" Nov 25 23:33:38 crc kubenswrapper[5045]: I1125 23:33:38.662794 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1081ef2-bad7-4b60-a3d5-616b05e1228d-catalog-content\") pod \"redhat-marketplace-ngpt7\" (UID: \"a1081ef2-bad7-4b60-a3d5-616b05e1228d\") " pod="openshift-marketplace/redhat-marketplace-ngpt7" Nov 25 23:33:38 crc kubenswrapper[5045]: I1125 23:33:38.696828 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gp5cs\" (UniqueName: \"kubernetes.io/projected/a1081ef2-bad7-4b60-a3d5-616b05e1228d-kube-api-access-gp5cs\") pod \"redhat-marketplace-ngpt7\" (UID: \"a1081ef2-bad7-4b60-a3d5-616b05e1228d\") " pod="openshift-marketplace/redhat-marketplace-ngpt7" Nov 25 23:33:38 crc kubenswrapper[5045]: I1125 23:33:38.871170 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ngpt7" Nov 25 23:33:39 crc kubenswrapper[5045]: I1125 23:33:39.406616 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ngpt7"] Nov 25 23:33:39 crc kubenswrapper[5045]: W1125 23:33:39.421024 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda1081ef2_bad7_4b60_a3d5_616b05e1228d.slice/crio-d2b8824c862bbcda22e1a3c767c8787f65f62dacc218dc45d46d5a1b0b8b244f WatchSource:0}: Error finding container d2b8824c862bbcda22e1a3c767c8787f65f62dacc218dc45d46d5a1b0b8b244f: Status 404 returned error can't find the container with id d2b8824c862bbcda22e1a3c767c8787f65f62dacc218dc45d46d5a1b0b8b244f Nov 25 23:33:40 crc kubenswrapper[5045]: I1125 23:33:40.063241 5045 generic.go:334] "Generic (PLEG): container finished" podID="a1081ef2-bad7-4b60-a3d5-616b05e1228d" containerID="e7e4dc143cb6dc7fcf79c730bc266e2f556bd66dd8a7025a1c9a335445874ea3" exitCode=0 Nov 25 23:33:40 crc kubenswrapper[5045]: I1125 23:33:40.063297 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ngpt7" event={"ID":"a1081ef2-bad7-4b60-a3d5-616b05e1228d","Type":"ContainerDied","Data":"e7e4dc143cb6dc7fcf79c730bc266e2f556bd66dd8a7025a1c9a335445874ea3"} Nov 25 23:33:40 crc kubenswrapper[5045]: I1125 23:33:40.063448 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ngpt7" event={"ID":"a1081ef2-bad7-4b60-a3d5-616b05e1228d","Type":"ContainerStarted","Data":"d2b8824c862bbcda22e1a3c767c8787f65f62dacc218dc45d46d5a1b0b8b244f"} Nov 25 23:33:40 crc kubenswrapper[5045]: I1125 23:33:40.067281 5045 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 23:33:41 crc kubenswrapper[5045]: I1125 23:33:41.077228 5045 generic.go:334] "Generic (PLEG): container finished" podID="a1081ef2-bad7-4b60-a3d5-616b05e1228d" containerID="b7802d5a10b07797c1ec1532a8dcb9c58263711048bfe6746058cff9510eeb3f" exitCode=0 Nov 25 23:33:41 crc kubenswrapper[5045]: I1125 23:33:41.077341 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ngpt7" event={"ID":"a1081ef2-bad7-4b60-a3d5-616b05e1228d","Type":"ContainerDied","Data":"b7802d5a10b07797c1ec1532a8dcb9c58263711048bfe6746058cff9510eeb3f"} Nov 25 23:33:42 crc kubenswrapper[5045]: I1125 23:33:42.089698 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ngpt7" event={"ID":"a1081ef2-bad7-4b60-a3d5-616b05e1228d","Type":"ContainerStarted","Data":"92b94acfe09677ff1809efabda5b1ceb21fab135cbd0b77ef91f736bc0aa5597"} Nov 25 23:33:42 crc kubenswrapper[5045]: I1125 23:33:42.115627 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ngpt7" podStartSLOduration=2.522021189 podStartE2EDuration="4.115610379s" podCreationTimestamp="2025-11-25 23:33:38 +0000 UTC" firstStartedPulling="2025-11-25 23:33:40.066626252 +0000 UTC m=+2076.424285394" lastFinishedPulling="2025-11-25 23:33:41.660215442 +0000 UTC m=+2078.017874584" observedRunningTime="2025-11-25 23:33:42.109404392 +0000 UTC m=+2078.467063504" watchObservedRunningTime="2025-11-25 23:33:42.115610379 +0000 UTC m=+2078.473269491" Nov 25 23:33:42 crc kubenswrapper[5045]: I1125 23:33:42.693907 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8js75"] Nov 25 23:33:42 crc kubenswrapper[5045]: I1125 23:33:42.696110 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8js75" Nov 25 23:33:42 crc kubenswrapper[5045]: I1125 23:33:42.708768 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8js75"] Nov 25 23:33:42 crc kubenswrapper[5045]: I1125 23:33:42.858562 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8f3318d-bcda-413b-a4b9-1eb6d82c1131-utilities\") pod \"certified-operators-8js75\" (UID: \"f8f3318d-bcda-413b-a4b9-1eb6d82c1131\") " pod="openshift-marketplace/certified-operators-8js75" Nov 25 23:33:42 crc kubenswrapper[5045]: I1125 23:33:42.858881 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8f3318d-bcda-413b-a4b9-1eb6d82c1131-catalog-content\") pod \"certified-operators-8js75\" (UID: \"f8f3318d-bcda-413b-a4b9-1eb6d82c1131\") " pod="openshift-marketplace/certified-operators-8js75" Nov 25 23:33:42 crc kubenswrapper[5045]: I1125 23:33:42.858940 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b45mb\" (UniqueName: \"kubernetes.io/projected/f8f3318d-bcda-413b-a4b9-1eb6d82c1131-kube-api-access-b45mb\") pod \"certified-operators-8js75\" (UID: \"f8f3318d-bcda-413b-a4b9-1eb6d82c1131\") " pod="openshift-marketplace/certified-operators-8js75" Nov 25 23:33:42 crc kubenswrapper[5045]: I1125 23:33:42.959985 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8f3318d-bcda-413b-a4b9-1eb6d82c1131-utilities\") pod \"certified-operators-8js75\" (UID: \"f8f3318d-bcda-413b-a4b9-1eb6d82c1131\") " pod="openshift-marketplace/certified-operators-8js75" Nov 25 23:33:42 crc kubenswrapper[5045]: I1125 23:33:42.960066 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8f3318d-bcda-413b-a4b9-1eb6d82c1131-catalog-content\") pod \"certified-operators-8js75\" (UID: \"f8f3318d-bcda-413b-a4b9-1eb6d82c1131\") " pod="openshift-marketplace/certified-operators-8js75" Nov 25 23:33:42 crc kubenswrapper[5045]: I1125 23:33:42.960130 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b45mb\" (UniqueName: \"kubernetes.io/projected/f8f3318d-bcda-413b-a4b9-1eb6d82c1131-kube-api-access-b45mb\") pod \"certified-operators-8js75\" (UID: \"f8f3318d-bcda-413b-a4b9-1eb6d82c1131\") " pod="openshift-marketplace/certified-operators-8js75" Nov 25 23:33:42 crc kubenswrapper[5045]: I1125 23:33:42.960742 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8f3318d-bcda-413b-a4b9-1eb6d82c1131-catalog-content\") pod \"certified-operators-8js75\" (UID: \"f8f3318d-bcda-413b-a4b9-1eb6d82c1131\") " pod="openshift-marketplace/certified-operators-8js75" Nov 25 23:33:42 crc kubenswrapper[5045]: I1125 23:33:42.960699 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8f3318d-bcda-413b-a4b9-1eb6d82c1131-utilities\") pod \"certified-operators-8js75\" (UID: \"f8f3318d-bcda-413b-a4b9-1eb6d82c1131\") " pod="openshift-marketplace/certified-operators-8js75" Nov 25 23:33:42 crc kubenswrapper[5045]: I1125 23:33:42.979398 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b45mb\" (UniqueName: \"kubernetes.io/projected/f8f3318d-bcda-413b-a4b9-1eb6d82c1131-kube-api-access-b45mb\") pod \"certified-operators-8js75\" (UID: \"f8f3318d-bcda-413b-a4b9-1eb6d82c1131\") " pod="openshift-marketplace/certified-operators-8js75" Nov 25 23:33:43 crc kubenswrapper[5045]: I1125 23:33:43.014780 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8js75" Nov 25 23:33:43 crc kubenswrapper[5045]: W1125 23:33:43.551858 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf8f3318d_bcda_413b_a4b9_1eb6d82c1131.slice/crio-6f4fd8d48509ac41a6a4167392baa377eba510da0112366c5b3a1876aaa9aca9 WatchSource:0}: Error finding container 6f4fd8d48509ac41a6a4167392baa377eba510da0112366c5b3a1876aaa9aca9: Status 404 returned error can't find the container with id 6f4fd8d48509ac41a6a4167392baa377eba510da0112366c5b3a1876aaa9aca9 Nov 25 23:33:43 crc kubenswrapper[5045]: I1125 23:33:43.562105 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8js75"] Nov 25 23:33:44 crc kubenswrapper[5045]: I1125 23:33:44.120129 5045 generic.go:334] "Generic (PLEG): container finished" podID="f8f3318d-bcda-413b-a4b9-1eb6d82c1131" containerID="3ba353ce42fac7ba6c9884d29303cdc86684a557516319fc9580fddbf99eb6da" exitCode=0 Nov 25 23:33:44 crc kubenswrapper[5045]: I1125 23:33:44.120195 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8js75" event={"ID":"f8f3318d-bcda-413b-a4b9-1eb6d82c1131","Type":"ContainerDied","Data":"3ba353ce42fac7ba6c9884d29303cdc86684a557516319fc9580fddbf99eb6da"} Nov 25 23:33:44 crc kubenswrapper[5045]: I1125 23:33:44.120434 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8js75" event={"ID":"f8f3318d-bcda-413b-a4b9-1eb6d82c1131","Type":"ContainerStarted","Data":"6f4fd8d48509ac41a6a4167392baa377eba510da0112366c5b3a1876aaa9aca9"} Nov 25 23:33:46 crc kubenswrapper[5045]: I1125 23:33:46.145457 5045 generic.go:334] "Generic (PLEG): container finished" podID="f8f3318d-bcda-413b-a4b9-1eb6d82c1131" containerID="9dde6b068dc116ae5595c34aaaf44ebe2d6b47c988be0fe2d38b040cd1b1e7d3" exitCode=0 Nov 25 23:33:46 crc kubenswrapper[5045]: I1125 23:33:46.145569 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8js75" event={"ID":"f8f3318d-bcda-413b-a4b9-1eb6d82c1131","Type":"ContainerDied","Data":"9dde6b068dc116ae5595c34aaaf44ebe2d6b47c988be0fe2d38b040cd1b1e7d3"} Nov 25 23:33:47 crc kubenswrapper[5045]: I1125 23:33:47.158331 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8js75" event={"ID":"f8f3318d-bcda-413b-a4b9-1eb6d82c1131","Type":"ContainerStarted","Data":"4ca48daa6364de1523a6de406848733d1da086fc72a10f39f42b9000f40ef288"} Nov 25 23:33:47 crc kubenswrapper[5045]: I1125 23:33:47.179021 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8js75" podStartSLOduration=2.62734617 podStartE2EDuration="5.178997656s" podCreationTimestamp="2025-11-25 23:33:42 +0000 UTC" firstStartedPulling="2025-11-25 23:33:44.121976843 +0000 UTC m=+2080.479635985" lastFinishedPulling="2025-11-25 23:33:46.673628349 +0000 UTC m=+2083.031287471" observedRunningTime="2025-11-25 23:33:47.176878442 +0000 UTC m=+2083.534537584" watchObservedRunningTime="2025-11-25 23:33:47.178997656 +0000 UTC m=+2083.536656788" Nov 25 23:33:48 crc kubenswrapper[5045]: I1125 23:33:48.872184 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ngpt7" Nov 25 23:33:48 crc kubenswrapper[5045]: I1125 23:33:48.872644 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ngpt7" Nov 25 23:33:48 crc kubenswrapper[5045]: I1125 23:33:48.955749 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ngpt7" Nov 25 23:33:49 crc kubenswrapper[5045]: I1125 23:33:49.262385 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ngpt7" Nov 25 23:33:50 crc kubenswrapper[5045]: I1125 23:33:50.079397 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ngpt7"] Nov 25 23:33:51 crc kubenswrapper[5045]: I1125 23:33:51.206299 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ngpt7" podUID="a1081ef2-bad7-4b60-a3d5-616b05e1228d" containerName="registry-server" containerID="cri-o://92b94acfe09677ff1809efabda5b1ceb21fab135cbd0b77ef91f736bc0aa5597" gracePeriod=2 Nov 25 23:33:51 crc kubenswrapper[5045]: I1125 23:33:51.742778 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ngpt7" Nov 25 23:33:51 crc kubenswrapper[5045]: I1125 23:33:51.784287 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gp5cs\" (UniqueName: \"kubernetes.io/projected/a1081ef2-bad7-4b60-a3d5-616b05e1228d-kube-api-access-gp5cs\") pod \"a1081ef2-bad7-4b60-a3d5-616b05e1228d\" (UID: \"a1081ef2-bad7-4b60-a3d5-616b05e1228d\") " Nov 25 23:33:51 crc kubenswrapper[5045]: I1125 23:33:51.784487 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1081ef2-bad7-4b60-a3d5-616b05e1228d-catalog-content\") pod \"a1081ef2-bad7-4b60-a3d5-616b05e1228d\" (UID: \"a1081ef2-bad7-4b60-a3d5-616b05e1228d\") " Nov 25 23:33:51 crc kubenswrapper[5045]: I1125 23:33:51.784557 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1081ef2-bad7-4b60-a3d5-616b05e1228d-utilities\") pod \"a1081ef2-bad7-4b60-a3d5-616b05e1228d\" (UID: \"a1081ef2-bad7-4b60-a3d5-616b05e1228d\") " Nov 25 23:33:51 crc kubenswrapper[5045]: I1125 23:33:51.785904 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a1081ef2-bad7-4b60-a3d5-616b05e1228d-utilities" (OuterVolumeSpecName: "utilities") pod "a1081ef2-bad7-4b60-a3d5-616b05e1228d" (UID: "a1081ef2-bad7-4b60-a3d5-616b05e1228d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:33:51 crc kubenswrapper[5045]: I1125 23:33:51.792150 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1081ef2-bad7-4b60-a3d5-616b05e1228d-kube-api-access-gp5cs" (OuterVolumeSpecName: "kube-api-access-gp5cs") pod "a1081ef2-bad7-4b60-a3d5-616b05e1228d" (UID: "a1081ef2-bad7-4b60-a3d5-616b05e1228d"). InnerVolumeSpecName "kube-api-access-gp5cs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:33:51 crc kubenswrapper[5045]: I1125 23:33:51.801161 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a1081ef2-bad7-4b60-a3d5-616b05e1228d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a1081ef2-bad7-4b60-a3d5-616b05e1228d" (UID: "a1081ef2-bad7-4b60-a3d5-616b05e1228d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:33:51 crc kubenswrapper[5045]: I1125 23:33:51.886883 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1081ef2-bad7-4b60-a3d5-616b05e1228d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:33:51 crc kubenswrapper[5045]: I1125 23:33:51.886982 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gp5cs\" (UniqueName: \"kubernetes.io/projected/a1081ef2-bad7-4b60-a3d5-616b05e1228d-kube-api-access-gp5cs\") on node \"crc\" DevicePath \"\"" Nov 25 23:33:51 crc kubenswrapper[5045]: I1125 23:33:51.887005 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1081ef2-bad7-4b60-a3d5-616b05e1228d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:33:52 crc kubenswrapper[5045]: I1125 23:33:52.226422 5045 generic.go:334] "Generic (PLEG): container finished" podID="a1081ef2-bad7-4b60-a3d5-616b05e1228d" containerID="92b94acfe09677ff1809efabda5b1ceb21fab135cbd0b77ef91f736bc0aa5597" exitCode=0 Nov 25 23:33:52 crc kubenswrapper[5045]: I1125 23:33:52.226470 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ngpt7" event={"ID":"a1081ef2-bad7-4b60-a3d5-616b05e1228d","Type":"ContainerDied","Data":"92b94acfe09677ff1809efabda5b1ceb21fab135cbd0b77ef91f736bc0aa5597"} Nov 25 23:33:52 crc kubenswrapper[5045]: I1125 23:33:52.226507 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ngpt7" event={"ID":"a1081ef2-bad7-4b60-a3d5-616b05e1228d","Type":"ContainerDied","Data":"d2b8824c862bbcda22e1a3c767c8787f65f62dacc218dc45d46d5a1b0b8b244f"} Nov 25 23:33:52 crc kubenswrapper[5045]: I1125 23:33:52.226553 5045 scope.go:117] "RemoveContainer" containerID="92b94acfe09677ff1809efabda5b1ceb21fab135cbd0b77ef91f736bc0aa5597" Nov 25 23:33:52 crc kubenswrapper[5045]: I1125 23:33:52.226583 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ngpt7" Nov 25 23:33:52 crc kubenswrapper[5045]: I1125 23:33:52.260460 5045 scope.go:117] "RemoveContainer" containerID="b7802d5a10b07797c1ec1532a8dcb9c58263711048bfe6746058cff9510eeb3f" Nov 25 23:33:52 crc kubenswrapper[5045]: I1125 23:33:52.306461 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ngpt7"] Nov 25 23:33:52 crc kubenswrapper[5045]: I1125 23:33:52.316414 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ngpt7"] Nov 25 23:33:52 crc kubenswrapper[5045]: I1125 23:33:52.319866 5045 scope.go:117] "RemoveContainer" containerID="e7e4dc143cb6dc7fcf79c730bc266e2f556bd66dd8a7025a1c9a335445874ea3" Nov 25 23:33:52 crc kubenswrapper[5045]: I1125 23:33:52.373035 5045 scope.go:117] "RemoveContainer" containerID="92b94acfe09677ff1809efabda5b1ceb21fab135cbd0b77ef91f736bc0aa5597" Nov 25 23:33:52 crc kubenswrapper[5045]: E1125 23:33:52.373496 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92b94acfe09677ff1809efabda5b1ceb21fab135cbd0b77ef91f736bc0aa5597\": container with ID starting with 92b94acfe09677ff1809efabda5b1ceb21fab135cbd0b77ef91f736bc0aa5597 not found: ID does not exist" containerID="92b94acfe09677ff1809efabda5b1ceb21fab135cbd0b77ef91f736bc0aa5597" Nov 25 23:33:52 crc kubenswrapper[5045]: I1125 23:33:52.373531 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92b94acfe09677ff1809efabda5b1ceb21fab135cbd0b77ef91f736bc0aa5597"} err="failed to get container status \"92b94acfe09677ff1809efabda5b1ceb21fab135cbd0b77ef91f736bc0aa5597\": rpc error: code = NotFound desc = could not find container \"92b94acfe09677ff1809efabda5b1ceb21fab135cbd0b77ef91f736bc0aa5597\": container with ID starting with 92b94acfe09677ff1809efabda5b1ceb21fab135cbd0b77ef91f736bc0aa5597 not found: ID does not exist" Nov 25 23:33:52 crc kubenswrapper[5045]: I1125 23:33:52.373551 5045 scope.go:117] "RemoveContainer" containerID="b7802d5a10b07797c1ec1532a8dcb9c58263711048bfe6746058cff9510eeb3f" Nov 25 23:33:52 crc kubenswrapper[5045]: E1125 23:33:52.374017 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7802d5a10b07797c1ec1532a8dcb9c58263711048bfe6746058cff9510eeb3f\": container with ID starting with b7802d5a10b07797c1ec1532a8dcb9c58263711048bfe6746058cff9510eeb3f not found: ID does not exist" containerID="b7802d5a10b07797c1ec1532a8dcb9c58263711048bfe6746058cff9510eeb3f" Nov 25 23:33:52 crc kubenswrapper[5045]: I1125 23:33:52.374231 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7802d5a10b07797c1ec1532a8dcb9c58263711048bfe6746058cff9510eeb3f"} err="failed to get container status \"b7802d5a10b07797c1ec1532a8dcb9c58263711048bfe6746058cff9510eeb3f\": rpc error: code = NotFound desc = could not find container \"b7802d5a10b07797c1ec1532a8dcb9c58263711048bfe6746058cff9510eeb3f\": container with ID starting with b7802d5a10b07797c1ec1532a8dcb9c58263711048bfe6746058cff9510eeb3f not found: ID does not exist" Nov 25 23:33:52 crc kubenswrapper[5045]: I1125 23:33:52.374433 5045 scope.go:117] "RemoveContainer" containerID="e7e4dc143cb6dc7fcf79c730bc266e2f556bd66dd8a7025a1c9a335445874ea3" Nov 25 23:33:52 crc kubenswrapper[5045]: E1125 23:33:52.375158 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7e4dc143cb6dc7fcf79c730bc266e2f556bd66dd8a7025a1c9a335445874ea3\": container with ID starting with e7e4dc143cb6dc7fcf79c730bc266e2f556bd66dd8a7025a1c9a335445874ea3 not found: ID does not exist" containerID="e7e4dc143cb6dc7fcf79c730bc266e2f556bd66dd8a7025a1c9a335445874ea3" Nov 25 23:33:52 crc kubenswrapper[5045]: I1125 23:33:52.375183 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7e4dc143cb6dc7fcf79c730bc266e2f556bd66dd8a7025a1c9a335445874ea3"} err="failed to get container status \"e7e4dc143cb6dc7fcf79c730bc266e2f556bd66dd8a7025a1c9a335445874ea3\": rpc error: code = NotFound desc = could not find container \"e7e4dc143cb6dc7fcf79c730bc266e2f556bd66dd8a7025a1c9a335445874ea3\": container with ID starting with e7e4dc143cb6dc7fcf79c730bc266e2f556bd66dd8a7025a1c9a335445874ea3 not found: ID does not exist" Nov 25 23:33:52 crc kubenswrapper[5045]: I1125 23:33:52.415403 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1081ef2-bad7-4b60-a3d5-616b05e1228d" path="/var/lib/kubelet/pods/a1081ef2-bad7-4b60-a3d5-616b05e1228d/volumes" Nov 25 23:33:53 crc kubenswrapper[5045]: I1125 23:33:53.015859 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8js75" Nov 25 23:33:53 crc kubenswrapper[5045]: I1125 23:33:53.016365 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8js75" Nov 25 23:33:53 crc kubenswrapper[5045]: I1125 23:33:53.094382 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8js75" Nov 25 23:33:53 crc kubenswrapper[5045]: I1125 23:33:53.341066 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8js75" Nov 25 23:33:55 crc kubenswrapper[5045]: I1125 23:33:55.486265 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8js75"] Nov 25 23:33:55 crc kubenswrapper[5045]: I1125 23:33:55.486867 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8js75" podUID="f8f3318d-bcda-413b-a4b9-1eb6d82c1131" containerName="registry-server" containerID="cri-o://4ca48daa6364de1523a6de406848733d1da086fc72a10f39f42b9000f40ef288" gracePeriod=2 Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.019268 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8js75" Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.088975 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8f3318d-bcda-413b-a4b9-1eb6d82c1131-catalog-content\") pod \"f8f3318d-bcda-413b-a4b9-1eb6d82c1131\" (UID: \"f8f3318d-bcda-413b-a4b9-1eb6d82c1131\") " Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.089036 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b45mb\" (UniqueName: \"kubernetes.io/projected/f8f3318d-bcda-413b-a4b9-1eb6d82c1131-kube-api-access-b45mb\") pod \"f8f3318d-bcda-413b-a4b9-1eb6d82c1131\" (UID: \"f8f3318d-bcda-413b-a4b9-1eb6d82c1131\") " Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.089204 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8f3318d-bcda-413b-a4b9-1eb6d82c1131-utilities\") pod \"f8f3318d-bcda-413b-a4b9-1eb6d82c1131\" (UID: \"f8f3318d-bcda-413b-a4b9-1eb6d82c1131\") " Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.090085 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8f3318d-bcda-413b-a4b9-1eb6d82c1131-utilities" (OuterVolumeSpecName: "utilities") pod "f8f3318d-bcda-413b-a4b9-1eb6d82c1131" (UID: "f8f3318d-bcda-413b-a4b9-1eb6d82c1131"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.090730 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8f3318d-bcda-413b-a4b9-1eb6d82c1131-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.098979 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8f3318d-bcda-413b-a4b9-1eb6d82c1131-kube-api-access-b45mb" (OuterVolumeSpecName: "kube-api-access-b45mb") pod "f8f3318d-bcda-413b-a4b9-1eb6d82c1131" (UID: "f8f3318d-bcda-413b-a4b9-1eb6d82c1131"). InnerVolumeSpecName "kube-api-access-b45mb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.192447 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b45mb\" (UniqueName: \"kubernetes.io/projected/f8f3318d-bcda-413b-a4b9-1eb6d82c1131-kube-api-access-b45mb\") on node \"crc\" DevicePath \"\"" Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.275610 5045 generic.go:334] "Generic (PLEG): container finished" podID="f8f3318d-bcda-413b-a4b9-1eb6d82c1131" containerID="4ca48daa6364de1523a6de406848733d1da086fc72a10f39f42b9000f40ef288" exitCode=0 Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.275673 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8js75" event={"ID":"f8f3318d-bcda-413b-a4b9-1eb6d82c1131","Type":"ContainerDied","Data":"4ca48daa6364de1523a6de406848733d1da086fc72a10f39f42b9000f40ef288"} Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.275746 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8js75" event={"ID":"f8f3318d-bcda-413b-a4b9-1eb6d82c1131","Type":"ContainerDied","Data":"6f4fd8d48509ac41a6a4167392baa377eba510da0112366c5b3a1876aaa9aca9"} Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.275770 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8js75" Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.275799 5045 scope.go:117] "RemoveContainer" containerID="4ca48daa6364de1523a6de406848733d1da086fc72a10f39f42b9000f40ef288" Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.324030 5045 scope.go:117] "RemoveContainer" containerID="9dde6b068dc116ae5595c34aaaf44ebe2d6b47c988be0fe2d38b040cd1b1e7d3" Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.360211 5045 scope.go:117] "RemoveContainer" containerID="3ba353ce42fac7ba6c9884d29303cdc86684a557516319fc9580fddbf99eb6da" Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.420003 5045 scope.go:117] "RemoveContainer" containerID="4ca48daa6364de1523a6de406848733d1da086fc72a10f39f42b9000f40ef288" Nov 25 23:33:56 crc kubenswrapper[5045]: E1125 23:33:56.420583 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ca48daa6364de1523a6de406848733d1da086fc72a10f39f42b9000f40ef288\": container with ID starting with 4ca48daa6364de1523a6de406848733d1da086fc72a10f39f42b9000f40ef288 not found: ID does not exist" containerID="4ca48daa6364de1523a6de406848733d1da086fc72a10f39f42b9000f40ef288" Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.420628 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ca48daa6364de1523a6de406848733d1da086fc72a10f39f42b9000f40ef288"} err="failed to get container status \"4ca48daa6364de1523a6de406848733d1da086fc72a10f39f42b9000f40ef288\": rpc error: code = NotFound desc = could not find container \"4ca48daa6364de1523a6de406848733d1da086fc72a10f39f42b9000f40ef288\": container with ID starting with 4ca48daa6364de1523a6de406848733d1da086fc72a10f39f42b9000f40ef288 not found: ID does not exist" Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.420658 5045 scope.go:117] "RemoveContainer" containerID="9dde6b068dc116ae5595c34aaaf44ebe2d6b47c988be0fe2d38b040cd1b1e7d3" Nov 25 23:33:56 crc kubenswrapper[5045]: E1125 23:33:56.421073 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9dde6b068dc116ae5595c34aaaf44ebe2d6b47c988be0fe2d38b040cd1b1e7d3\": container with ID starting with 9dde6b068dc116ae5595c34aaaf44ebe2d6b47c988be0fe2d38b040cd1b1e7d3 not found: ID does not exist" containerID="9dde6b068dc116ae5595c34aaaf44ebe2d6b47c988be0fe2d38b040cd1b1e7d3" Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.421115 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9dde6b068dc116ae5595c34aaaf44ebe2d6b47c988be0fe2d38b040cd1b1e7d3"} err="failed to get container status \"9dde6b068dc116ae5595c34aaaf44ebe2d6b47c988be0fe2d38b040cd1b1e7d3\": rpc error: code = NotFound desc = could not find container \"9dde6b068dc116ae5595c34aaaf44ebe2d6b47c988be0fe2d38b040cd1b1e7d3\": container with ID starting with 9dde6b068dc116ae5595c34aaaf44ebe2d6b47c988be0fe2d38b040cd1b1e7d3 not found: ID does not exist" Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.421142 5045 scope.go:117] "RemoveContainer" containerID="3ba353ce42fac7ba6c9884d29303cdc86684a557516319fc9580fddbf99eb6da" Nov 25 23:33:56 crc kubenswrapper[5045]: E1125 23:33:56.421468 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ba353ce42fac7ba6c9884d29303cdc86684a557516319fc9580fddbf99eb6da\": container with ID starting with 3ba353ce42fac7ba6c9884d29303cdc86684a557516319fc9580fddbf99eb6da not found: ID does not exist" containerID="3ba353ce42fac7ba6c9884d29303cdc86684a557516319fc9580fddbf99eb6da" Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.421501 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ba353ce42fac7ba6c9884d29303cdc86684a557516319fc9580fddbf99eb6da"} err="failed to get container status \"3ba353ce42fac7ba6c9884d29303cdc86684a557516319fc9580fddbf99eb6da\": rpc error: code = NotFound desc = could not find container \"3ba353ce42fac7ba6c9884d29303cdc86684a557516319fc9580fddbf99eb6da\": container with ID starting with 3ba353ce42fac7ba6c9884d29303cdc86684a557516319fc9580fddbf99eb6da not found: ID does not exist" Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.520142 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8f3318d-bcda-413b-a4b9-1eb6d82c1131-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f8f3318d-bcda-413b-a4b9-1eb6d82c1131" (UID: "f8f3318d-bcda-413b-a4b9-1eb6d82c1131"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.600211 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8f3318d-bcda-413b-a4b9-1eb6d82c1131-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.651693 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8js75"] Nov 25 23:33:56 crc kubenswrapper[5045]: I1125 23:33:56.664515 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8js75"] Nov 25 23:33:58 crc kubenswrapper[5045]: I1125 23:33:58.414426 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8f3318d-bcda-413b-a4b9-1eb6d82c1131" path="/var/lib/kubelet/pods/f8f3318d-bcda-413b-a4b9-1eb6d82c1131/volumes" Nov 25 23:33:59 crc kubenswrapper[5045]: I1125 23:33:59.126669 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74"] Nov 25 23:33:59 crc kubenswrapper[5045]: I1125 23:33:59.135142 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd"] Nov 25 23:33:59 crc kubenswrapper[5045]: I1125 23:33:59.145534 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd"] Nov 25 23:33:59 crc kubenswrapper[5045]: I1125 23:33:59.154530 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl"] Nov 25 23:33:59 crc kubenswrapper[5045]: I1125 23:33:59.160928 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-h4tbd"] Nov 25 23:33:59 crc kubenswrapper[5045]: I1125 23:33:59.166756 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5mrhd"] Nov 25 23:33:59 crc kubenswrapper[5045]: I1125 23:33:59.172482 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8"] Nov 25 23:33:59 crc kubenswrapper[5045]: I1125 23:33:59.180543 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m"] Nov 25 23:33:59 crc kubenswrapper[5045]: I1125 23:33:59.195604 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc"] Nov 25 23:33:59 crc kubenswrapper[5045]: I1125 23:33:59.202354 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-nxg74"] Nov 25 23:33:59 crc kubenswrapper[5045]: I1125 23:33:59.209091 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m"] Nov 25 23:33:59 crc kubenswrapper[5045]: I1125 23:33:59.214775 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-bffvl"] Nov 25 23:33:59 crc kubenswrapper[5045]: I1125 23:33:59.220087 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-xqht7"] Nov 25 23:33:59 crc kubenswrapper[5045]: I1125 23:33:59.225269 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wjdn8"] Nov 25 23:33:59 crc kubenswrapper[5045]: I1125 23:33:59.230386 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p"] Nov 25 23:33:59 crc kubenswrapper[5045]: I1125 23:33:59.236822 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-q9c2m"] Nov 25 23:33:59 crc kubenswrapper[5045]: I1125 23:33:59.245134 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-xg7wc"] Nov 25 23:33:59 crc kubenswrapper[5045]: I1125 23:33:59.250486 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6sv8m"] Nov 25 23:33:59 crc kubenswrapper[5045]: I1125 23:33:59.255772 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-xqht7"] Nov 25 23:33:59 crc kubenswrapper[5045]: I1125 23:33:59.261364 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-w5d4p"] Nov 25 23:34:00 crc kubenswrapper[5045]: I1125 23:34:00.414663 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12c286e1-8a5e-4427-aa87-819186e68dc4" path="/var/lib/kubelet/pods/12c286e1-8a5e-4427-aa87-819186e68dc4/volumes" Nov 25 23:34:00 crc kubenswrapper[5045]: I1125 23:34:00.415475 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1abc767d-c2dd-4500-96ed-92f1dbf3d327" path="/var/lib/kubelet/pods/1abc767d-c2dd-4500-96ed-92f1dbf3d327/volumes" Nov 25 23:34:00 crc kubenswrapper[5045]: I1125 23:34:00.416229 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42a18661-c8fa-41a6-8480-5547871c51d6" path="/var/lib/kubelet/pods/42a18661-c8fa-41a6-8480-5547871c51d6/volumes" Nov 25 23:34:00 crc kubenswrapper[5045]: I1125 23:34:00.416947 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53770c81-187b-44d6-b05a-887e6683232f" path="/var/lib/kubelet/pods/53770c81-187b-44d6-b05a-887e6683232f/volumes" Nov 25 23:34:00 crc kubenswrapper[5045]: I1125 23:34:00.418250 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="579827b1-ab24-4853-8733-d9ec6d9e81f0" path="/var/lib/kubelet/pods/579827b1-ab24-4853-8733-d9ec6d9e81f0/volumes" Nov 25 23:34:00 crc kubenswrapper[5045]: I1125 23:34:00.418919 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6bde7529-f01b-4aaa-9fe3-1aa5341fad62" path="/var/lib/kubelet/pods/6bde7529-f01b-4aaa-9fe3-1aa5341fad62/volumes" Nov 25 23:34:00 crc kubenswrapper[5045]: I1125 23:34:00.419536 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9887c8c6-b00b-4415-b655-c127f62e3474" path="/var/lib/kubelet/pods/9887c8c6-b00b-4415-b655-c127f62e3474/volumes" Nov 25 23:34:00 crc kubenswrapper[5045]: I1125 23:34:00.420852 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98cd627c-ae90-4321-8046-ca0356bb9b7c" path="/var/lib/kubelet/pods/98cd627c-ae90-4321-8046-ca0356bb9b7c/volumes" Nov 25 23:34:00 crc kubenswrapper[5045]: I1125 23:34:00.421483 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3ed9cbb-dcd0-40c2-87b5-57087c5e5855" path="/var/lib/kubelet/pods/d3ed9cbb-dcd0-40c2-87b5-57087c5e5855/volumes" Nov 25 23:34:00 crc kubenswrapper[5045]: I1125 23:34:00.422120 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efb0b7f5-854d-495c-b19a-1fb1bcd60018" path="/var/lib/kubelet/pods/efb0b7f5-854d-495c-b19a-1fb1bcd60018/volumes" Nov 25 23:34:00 crc kubenswrapper[5045]: I1125 23:34:00.540936 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:34:00 crc kubenswrapper[5045]: I1125 23:34:00.541029 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:34:00 crc kubenswrapper[5045]: I1125 23:34:00.541098 5045 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 23:34:00 crc kubenswrapper[5045]: I1125 23:34:00.542278 5045 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a33f156d04cc1e10a7f2c15e4d00ab8b26b2ed9c77c496c8385c52559bc0d4b3"} pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 23:34:00 crc kubenswrapper[5045]: I1125 23:34:00.542422 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" containerID="cri-o://a33f156d04cc1e10a7f2c15e4d00ab8b26b2ed9c77c496c8385c52559bc0d4b3" gracePeriod=600 Nov 25 23:34:01 crc kubenswrapper[5045]: I1125 23:34:01.332335 5045 generic.go:334] "Generic (PLEG): container finished" podID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerID="a33f156d04cc1e10a7f2c15e4d00ab8b26b2ed9c77c496c8385c52559bc0d4b3" exitCode=0 Nov 25 23:34:01 crc kubenswrapper[5045]: I1125 23:34:01.332878 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerDied","Data":"a33f156d04cc1e10a7f2c15e4d00ab8b26b2ed9c77c496c8385c52559bc0d4b3"} Nov 25 23:34:01 crc kubenswrapper[5045]: I1125 23:34:01.333016 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerStarted","Data":"46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5"} Nov 25 23:34:01 crc kubenswrapper[5045]: I1125 23:34:01.333051 5045 scope.go:117] "RemoveContainer" containerID="74594b03a605a24cb1cef9f83d37b8f6da81ed030a5d20c4e359a05da5dae708" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.064479 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr"] Nov 25 23:34:05 crc kubenswrapper[5045]: E1125 23:34:05.065583 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1081ef2-bad7-4b60-a3d5-616b05e1228d" containerName="extract-utilities" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.065604 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1081ef2-bad7-4b60-a3d5-616b05e1228d" containerName="extract-utilities" Nov 25 23:34:05 crc kubenswrapper[5045]: E1125 23:34:05.065627 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1081ef2-bad7-4b60-a3d5-616b05e1228d" containerName="registry-server" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.065641 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1081ef2-bad7-4b60-a3d5-616b05e1228d" containerName="registry-server" Nov 25 23:34:05 crc kubenswrapper[5045]: E1125 23:34:05.065678 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8f3318d-bcda-413b-a4b9-1eb6d82c1131" containerName="registry-server" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.065690 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8f3318d-bcda-413b-a4b9-1eb6d82c1131" containerName="registry-server" Nov 25 23:34:05 crc kubenswrapper[5045]: E1125 23:34:05.065736 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8f3318d-bcda-413b-a4b9-1eb6d82c1131" containerName="extract-content" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.065748 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8f3318d-bcda-413b-a4b9-1eb6d82c1131" containerName="extract-content" Nov 25 23:34:05 crc kubenswrapper[5045]: E1125 23:34:05.065773 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1081ef2-bad7-4b60-a3d5-616b05e1228d" containerName="extract-content" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.065784 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1081ef2-bad7-4b60-a3d5-616b05e1228d" containerName="extract-content" Nov 25 23:34:05 crc kubenswrapper[5045]: E1125 23:34:05.065808 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8f3318d-bcda-413b-a4b9-1eb6d82c1131" containerName="extract-utilities" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.065820 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8f3318d-bcda-413b-a4b9-1eb6d82c1131" containerName="extract-utilities" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.066095 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1081ef2-bad7-4b60-a3d5-616b05e1228d" containerName="registry-server" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.066152 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8f3318d-bcda-413b-a4b9-1eb6d82c1131" containerName="registry-server" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.067527 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.071503 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.072971 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.073071 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.073117 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.073570 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.079962 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmlzz\" (UniqueName: \"kubernetes.io/projected/06f796e9-76e4-4067-915d-4efd6e38226a-kube-api-access-fmlzz\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr\" (UID: \"06f796e9-76e4-4067-915d-4efd6e38226a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.080051 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr\" (UID: \"06f796e9-76e4-4067-915d-4efd6e38226a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.080154 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr\" (UID: \"06f796e9-76e4-4067-915d-4efd6e38226a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.080212 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr\" (UID: \"06f796e9-76e4-4067-915d-4efd6e38226a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.080249 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr\" (UID: \"06f796e9-76e4-4067-915d-4efd6e38226a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.082206 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr"] Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.182834 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmlzz\" (UniqueName: \"kubernetes.io/projected/06f796e9-76e4-4067-915d-4efd6e38226a-kube-api-access-fmlzz\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr\" (UID: \"06f796e9-76e4-4067-915d-4efd6e38226a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.182930 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr\" (UID: \"06f796e9-76e4-4067-915d-4efd6e38226a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.183006 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr\" (UID: \"06f796e9-76e4-4067-915d-4efd6e38226a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.183058 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr\" (UID: \"06f796e9-76e4-4067-915d-4efd6e38226a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.183090 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr\" (UID: \"06f796e9-76e4-4067-915d-4efd6e38226a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.192369 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr\" (UID: \"06f796e9-76e4-4067-915d-4efd6e38226a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.192413 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr\" (UID: \"06f796e9-76e4-4067-915d-4efd6e38226a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.192990 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr\" (UID: \"06f796e9-76e4-4067-915d-4efd6e38226a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.197021 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr\" (UID: \"06f796e9-76e4-4067-915d-4efd6e38226a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.204908 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmlzz\" (UniqueName: \"kubernetes.io/projected/06f796e9-76e4-4067-915d-4efd6e38226a-kube-api-access-fmlzz\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr\" (UID: \"06f796e9-76e4-4067-915d-4efd6e38226a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.401783 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" Nov 25 23:34:05 crc kubenswrapper[5045]: I1125 23:34:05.768065 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr"] Nov 25 23:34:06 crc kubenswrapper[5045]: I1125 23:34:06.384528 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" event={"ID":"06f796e9-76e4-4067-915d-4efd6e38226a","Type":"ContainerStarted","Data":"c85a08a9d09ee14ffd51c2390794455def489112d05fd6bb959d73594fdb4744"} Nov 25 23:34:07 crc kubenswrapper[5045]: I1125 23:34:07.398477 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" event={"ID":"06f796e9-76e4-4067-915d-4efd6e38226a","Type":"ContainerStarted","Data":"e333ceb2f90a1282b68499c3a85091fc3e3cfa9b0b5e8f2e85633601ed0b8082"} Nov 25 23:34:07 crc kubenswrapper[5045]: I1125 23:34:07.435975 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" podStartSLOduration=1.7919402180000001 podStartE2EDuration="2.435953827s" podCreationTimestamp="2025-11-25 23:34:05 +0000 UTC" firstStartedPulling="2025-11-25 23:34:05.77634135 +0000 UTC m=+2102.134000502" lastFinishedPulling="2025-11-25 23:34:06.420354959 +0000 UTC m=+2102.778014111" observedRunningTime="2025-11-25 23:34:07.42583005 +0000 UTC m=+2103.783489192" watchObservedRunningTime="2025-11-25 23:34:07.435953827 +0000 UTC m=+2103.793612969" Nov 25 23:34:18 crc kubenswrapper[5045]: I1125 23:34:18.520561 5045 generic.go:334] "Generic (PLEG): container finished" podID="06f796e9-76e4-4067-915d-4efd6e38226a" containerID="e333ceb2f90a1282b68499c3a85091fc3e3cfa9b0b5e8f2e85633601ed0b8082" exitCode=0 Nov 25 23:34:18 crc kubenswrapper[5045]: I1125 23:34:18.520708 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" event={"ID":"06f796e9-76e4-4067-915d-4efd6e38226a","Type":"ContainerDied","Data":"e333ceb2f90a1282b68499c3a85091fc3e3cfa9b0b5e8f2e85633601ed0b8082"} Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.100528 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.208622 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-repo-setup-combined-ca-bundle\") pod \"06f796e9-76e4-4067-915d-4efd6e38226a\" (UID: \"06f796e9-76e4-4067-915d-4efd6e38226a\") " Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.208921 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-ssh-key\") pod \"06f796e9-76e4-4067-915d-4efd6e38226a\" (UID: \"06f796e9-76e4-4067-915d-4efd6e38226a\") " Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.209082 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fmlzz\" (UniqueName: \"kubernetes.io/projected/06f796e9-76e4-4067-915d-4efd6e38226a-kube-api-access-fmlzz\") pod \"06f796e9-76e4-4067-915d-4efd6e38226a\" (UID: \"06f796e9-76e4-4067-915d-4efd6e38226a\") " Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.209207 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-inventory\") pod \"06f796e9-76e4-4067-915d-4efd6e38226a\" (UID: \"06f796e9-76e4-4067-915d-4efd6e38226a\") " Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.209972 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-ceph\") pod \"06f796e9-76e4-4067-915d-4efd6e38226a\" (UID: \"06f796e9-76e4-4067-915d-4efd6e38226a\") " Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.215871 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "06f796e9-76e4-4067-915d-4efd6e38226a" (UID: "06f796e9-76e4-4067-915d-4efd6e38226a"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.220148 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06f796e9-76e4-4067-915d-4efd6e38226a-kube-api-access-fmlzz" (OuterVolumeSpecName: "kube-api-access-fmlzz") pod "06f796e9-76e4-4067-915d-4efd6e38226a" (UID: "06f796e9-76e4-4067-915d-4efd6e38226a"). InnerVolumeSpecName "kube-api-access-fmlzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.222339 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-ceph" (OuterVolumeSpecName: "ceph") pod "06f796e9-76e4-4067-915d-4efd6e38226a" (UID: "06f796e9-76e4-4067-915d-4efd6e38226a"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.257252 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-inventory" (OuterVolumeSpecName: "inventory") pod "06f796e9-76e4-4067-915d-4efd6e38226a" (UID: "06f796e9-76e4-4067-915d-4efd6e38226a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.265033 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "06f796e9-76e4-4067-915d-4efd6e38226a" (UID: "06f796e9-76e4-4067-915d-4efd6e38226a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.282902 5045 scope.go:117] "RemoveContainer" containerID="66fad4c6427ab782a5d08fad5a3fde65cec2ada061559dc47c263f8f442a0d90" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.315221 5045 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.315566 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.315775 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fmlzz\" (UniqueName: \"kubernetes.io/projected/06f796e9-76e4-4067-915d-4efd6e38226a-kube-api-access-fmlzz\") on node \"crc\" DevicePath \"\"" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.315940 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.316143 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/06f796e9-76e4-4067-915d-4efd6e38226a-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.461145 5045 scope.go:117] "RemoveContainer" containerID="45da6e7843b8cdd6105419c9647b589bea790db89ac473bc4e744eef583678ea" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.506741 5045 scope.go:117] "RemoveContainer" containerID="e415768329531e33ff2d9fb072313a5b67d9290f31d1561fc0e00dde0a65e2e8" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.547307 5045 scope.go:117] "RemoveContainer" containerID="933b07dddfb1228622c0f32e2f206a5c7170738dd6610e9f9c8a01313116310a" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.547630 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" event={"ID":"06f796e9-76e4-4067-915d-4efd6e38226a","Type":"ContainerDied","Data":"c85a08a9d09ee14ffd51c2390794455def489112d05fd6bb959d73594fdb4744"} Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.547679 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c85a08a9d09ee14ffd51c2390794455def489112d05fd6bb959d73594fdb4744" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.547786 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.610952 5045 scope.go:117] "RemoveContainer" containerID="1c4141d685bfa1ed55039a7a3f1807cc078385db7c9bef89942c6f21ac928cfc" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.634967 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv"] Nov 25 23:34:20 crc kubenswrapper[5045]: E1125 23:34:20.635396 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06f796e9-76e4-4067-915d-4efd6e38226a" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.635419 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="06f796e9-76e4-4067-915d-4efd6e38226a" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.635678 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="06f796e9-76e4-4067-915d-4efd6e38226a" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.636365 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.641238 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.641277 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.641522 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.641844 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.641949 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.666599 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv"] Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.696927 5045 scope.go:117] "RemoveContainer" containerID="1dcbd0c8abc5213da0cb8ca807f387da17065211b920d538aa7dfd0fc4339ea3" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.831425 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv\" (UID: \"7301c52a-3ce7-478e-867e-6f458de32f19\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.831499 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qszm8\" (UniqueName: \"kubernetes.io/projected/7301c52a-3ce7-478e-867e-6f458de32f19-kube-api-access-qszm8\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv\" (UID: \"7301c52a-3ce7-478e-867e-6f458de32f19\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.831660 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv\" (UID: \"7301c52a-3ce7-478e-867e-6f458de32f19\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.831807 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv\" (UID: \"7301c52a-3ce7-478e-867e-6f458de32f19\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.831863 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv\" (UID: \"7301c52a-3ce7-478e-867e-6f458de32f19\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.938129 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qszm8\" (UniqueName: \"kubernetes.io/projected/7301c52a-3ce7-478e-867e-6f458de32f19-kube-api-access-qszm8\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv\" (UID: \"7301c52a-3ce7-478e-867e-6f458de32f19\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.938255 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv\" (UID: \"7301c52a-3ce7-478e-867e-6f458de32f19\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.938287 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv\" (UID: \"7301c52a-3ce7-478e-867e-6f458de32f19\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.938324 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv\" (UID: \"7301c52a-3ce7-478e-867e-6f458de32f19\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.938362 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv\" (UID: \"7301c52a-3ce7-478e-867e-6f458de32f19\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.943796 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv\" (UID: \"7301c52a-3ce7-478e-867e-6f458de32f19\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.943831 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv\" (UID: \"7301c52a-3ce7-478e-867e-6f458de32f19\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.943980 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv\" (UID: \"7301c52a-3ce7-478e-867e-6f458de32f19\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.944622 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv\" (UID: \"7301c52a-3ce7-478e-867e-6f458de32f19\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.963616 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qszm8\" (UniqueName: \"kubernetes.io/projected/7301c52a-3ce7-478e-867e-6f458de32f19-kube-api-access-qszm8\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv\" (UID: \"7301c52a-3ce7-478e-867e-6f458de32f19\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" Nov 25 23:34:20 crc kubenswrapper[5045]: I1125 23:34:20.972651 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" Nov 25 23:34:21 crc kubenswrapper[5045]: I1125 23:34:21.324031 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv"] Nov 25 23:34:21 crc kubenswrapper[5045]: I1125 23:34:21.572132 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" event={"ID":"7301c52a-3ce7-478e-867e-6f458de32f19","Type":"ContainerStarted","Data":"a2922b6f948477ffd48f097671bc8f73670736f7c8f68db4e1a5dae7e606a18c"} Nov 25 23:34:22 crc kubenswrapper[5045]: I1125 23:34:22.585407 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" event={"ID":"7301c52a-3ce7-478e-867e-6f458de32f19","Type":"ContainerStarted","Data":"77172b9a45979399461d577c7e816b52025177a8c21b4018a25367c48cc601af"} Nov 25 23:34:22 crc kubenswrapper[5045]: I1125 23:34:22.617956 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" podStartSLOduration=1.96998983 podStartE2EDuration="2.617896008s" podCreationTimestamp="2025-11-25 23:34:20 +0000 UTC" firstStartedPulling="2025-11-25 23:34:21.334670372 +0000 UTC m=+2117.692329484" lastFinishedPulling="2025-11-25 23:34:21.98257651 +0000 UTC m=+2118.340235662" observedRunningTime="2025-11-25 23:34:22.608569891 +0000 UTC m=+2118.966229033" watchObservedRunningTime="2025-11-25 23:34:22.617896008 +0000 UTC m=+2118.975555160" Nov 25 23:35:20 crc kubenswrapper[5045]: I1125 23:35:20.887215 5045 scope.go:117] "RemoveContainer" containerID="befbfcea73604b99a29128a970a7df2aa233646d32e71e6fd99e2479d3b02558" Nov 25 23:36:00 crc kubenswrapper[5045]: I1125 23:36:00.540956 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:36:00 crc kubenswrapper[5045]: I1125 23:36:00.543048 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:36:01 crc kubenswrapper[5045]: I1125 23:36:01.617879 5045 generic.go:334] "Generic (PLEG): container finished" podID="7301c52a-3ce7-478e-867e-6f458de32f19" containerID="77172b9a45979399461d577c7e816b52025177a8c21b4018a25367c48cc601af" exitCode=0 Nov 25 23:36:01 crc kubenswrapper[5045]: I1125 23:36:01.617996 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" event={"ID":"7301c52a-3ce7-478e-867e-6f458de32f19","Type":"ContainerDied","Data":"77172b9a45979399461d577c7e816b52025177a8c21b4018a25367c48cc601af"} Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.176724 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.247013 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qszm8\" (UniqueName: \"kubernetes.io/projected/7301c52a-3ce7-478e-867e-6f458de32f19-kube-api-access-qszm8\") pod \"7301c52a-3ce7-478e-867e-6f458de32f19\" (UID: \"7301c52a-3ce7-478e-867e-6f458de32f19\") " Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.247089 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-bootstrap-combined-ca-bundle\") pod \"7301c52a-3ce7-478e-867e-6f458de32f19\" (UID: \"7301c52a-3ce7-478e-867e-6f458de32f19\") " Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.247169 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-inventory\") pod \"7301c52a-3ce7-478e-867e-6f458de32f19\" (UID: \"7301c52a-3ce7-478e-867e-6f458de32f19\") " Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.247211 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-ceph\") pod \"7301c52a-3ce7-478e-867e-6f458de32f19\" (UID: \"7301c52a-3ce7-478e-867e-6f458de32f19\") " Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.247347 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-ssh-key\") pod \"7301c52a-3ce7-478e-867e-6f458de32f19\" (UID: \"7301c52a-3ce7-478e-867e-6f458de32f19\") " Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.253304 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-ceph" (OuterVolumeSpecName: "ceph") pod "7301c52a-3ce7-478e-867e-6f458de32f19" (UID: "7301c52a-3ce7-478e-867e-6f458de32f19"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.253894 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "7301c52a-3ce7-478e-867e-6f458de32f19" (UID: "7301c52a-3ce7-478e-867e-6f458de32f19"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.254941 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7301c52a-3ce7-478e-867e-6f458de32f19-kube-api-access-qszm8" (OuterVolumeSpecName: "kube-api-access-qszm8") pod "7301c52a-3ce7-478e-867e-6f458de32f19" (UID: "7301c52a-3ce7-478e-867e-6f458de32f19"). InnerVolumeSpecName "kube-api-access-qszm8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.280464 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7301c52a-3ce7-478e-867e-6f458de32f19" (UID: "7301c52a-3ce7-478e-867e-6f458de32f19"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.280686 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-inventory" (OuterVolumeSpecName: "inventory") pod "7301c52a-3ce7-478e-867e-6f458de32f19" (UID: "7301c52a-3ce7-478e-867e-6f458de32f19"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.350454 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.350492 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.350501 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.350510 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qszm8\" (UniqueName: \"kubernetes.io/projected/7301c52a-3ce7-478e-867e-6f458de32f19-kube-api-access-qszm8\") on node \"crc\" DevicePath \"\"" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.350524 5045 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7301c52a-3ce7-478e-867e-6f458de32f19-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.643490 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" event={"ID":"7301c52a-3ce7-478e-867e-6f458de32f19","Type":"ContainerDied","Data":"a2922b6f948477ffd48f097671bc8f73670736f7c8f68db4e1a5dae7e606a18c"} Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.643865 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a2922b6f948477ffd48f097671bc8f73670736f7c8f68db4e1a5dae7e606a18c" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.643585 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.803885 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb"] Nov 25 23:36:03 crc kubenswrapper[5045]: E1125 23:36:03.804450 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7301c52a-3ce7-478e-867e-6f458de32f19" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.804516 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="7301c52a-3ce7-478e-867e-6f458de32f19" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.804755 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="7301c52a-3ce7-478e-867e-6f458de32f19" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.805360 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.807600 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.807680 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.808476 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.809351 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.810443 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.869055 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb"] Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.962517 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2td5\" (UniqueName: \"kubernetes.io/projected/c9631995-c169-41d0-90cb-9d1566919f23-kube-api-access-n2td5\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb\" (UID: \"c9631995-c169-41d0-90cb-9d1566919f23\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.963004 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c9631995-c169-41d0-90cb-9d1566919f23-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb\" (UID: \"c9631995-c169-41d0-90cb-9d1566919f23\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.963201 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c9631995-c169-41d0-90cb-9d1566919f23-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb\" (UID: \"c9631995-c169-41d0-90cb-9d1566919f23\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" Nov 25 23:36:03 crc kubenswrapper[5045]: I1125 23:36:03.963379 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c9631995-c169-41d0-90cb-9d1566919f23-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb\" (UID: \"c9631995-c169-41d0-90cb-9d1566919f23\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" Nov 25 23:36:04 crc kubenswrapper[5045]: I1125 23:36:04.067309 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2td5\" (UniqueName: \"kubernetes.io/projected/c9631995-c169-41d0-90cb-9d1566919f23-kube-api-access-n2td5\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb\" (UID: \"c9631995-c169-41d0-90cb-9d1566919f23\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" Nov 25 23:36:04 crc kubenswrapper[5045]: I1125 23:36:04.067416 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c9631995-c169-41d0-90cb-9d1566919f23-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb\" (UID: \"c9631995-c169-41d0-90cb-9d1566919f23\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" Nov 25 23:36:04 crc kubenswrapper[5045]: I1125 23:36:04.067470 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c9631995-c169-41d0-90cb-9d1566919f23-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb\" (UID: \"c9631995-c169-41d0-90cb-9d1566919f23\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" Nov 25 23:36:04 crc kubenswrapper[5045]: I1125 23:36:04.067507 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c9631995-c169-41d0-90cb-9d1566919f23-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb\" (UID: \"c9631995-c169-41d0-90cb-9d1566919f23\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" Nov 25 23:36:04 crc kubenswrapper[5045]: I1125 23:36:04.076953 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c9631995-c169-41d0-90cb-9d1566919f23-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb\" (UID: \"c9631995-c169-41d0-90cb-9d1566919f23\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" Nov 25 23:36:04 crc kubenswrapper[5045]: I1125 23:36:04.077113 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c9631995-c169-41d0-90cb-9d1566919f23-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb\" (UID: \"c9631995-c169-41d0-90cb-9d1566919f23\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" Nov 25 23:36:04 crc kubenswrapper[5045]: I1125 23:36:04.086491 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c9631995-c169-41d0-90cb-9d1566919f23-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb\" (UID: \"c9631995-c169-41d0-90cb-9d1566919f23\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" Nov 25 23:36:04 crc kubenswrapper[5045]: I1125 23:36:04.093535 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2td5\" (UniqueName: \"kubernetes.io/projected/c9631995-c169-41d0-90cb-9d1566919f23-kube-api-access-n2td5\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb\" (UID: \"c9631995-c169-41d0-90cb-9d1566919f23\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" Nov 25 23:36:04 crc kubenswrapper[5045]: I1125 23:36:04.124030 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" Nov 25 23:36:04 crc kubenswrapper[5045]: I1125 23:36:04.452935 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb"] Nov 25 23:36:04 crc kubenswrapper[5045]: I1125 23:36:04.656923 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" event={"ID":"c9631995-c169-41d0-90cb-9d1566919f23","Type":"ContainerStarted","Data":"4bae7506c90fcd846c57654e3fc3ae11caa3265856bd0747ff14f4eaedd63dca"} Nov 25 23:36:04 crc kubenswrapper[5045]: I1125 23:36:04.932857 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:36:05 crc kubenswrapper[5045]: I1125 23:36:05.672101 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" event={"ID":"c9631995-c169-41d0-90cb-9d1566919f23","Type":"ContainerStarted","Data":"b0df6116e4f25a21bd6b68490f5bcd5f0f75debb51fd29570358488a50f5b609"} Nov 25 23:36:05 crc kubenswrapper[5045]: I1125 23:36:05.715821 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" podStartSLOduration=2.247485619 podStartE2EDuration="2.715796707s" podCreationTimestamp="2025-11-25 23:36:03 +0000 UTC" firstStartedPulling="2025-11-25 23:36:04.461734908 +0000 UTC m=+2220.819394020" lastFinishedPulling="2025-11-25 23:36:04.930045996 +0000 UTC m=+2221.287705108" observedRunningTime="2025-11-25 23:36:05.703749157 +0000 UTC m=+2222.061408319" watchObservedRunningTime="2025-11-25 23:36:05.715796707 +0000 UTC m=+2222.073455849" Nov 25 23:36:21 crc kubenswrapper[5045]: I1125 23:36:20.999429 5045 scope.go:117] "RemoveContainer" containerID="bc530f20d14c755c27c894f1d9eeb6f554e24bdf576583bef6e876b8c45e89b8" Nov 25 23:36:21 crc kubenswrapper[5045]: I1125 23:36:21.058091 5045 scope.go:117] "RemoveContainer" containerID="fa2ae70d3e092c430dc865e69c53b82de73489acf8306eb68fb2b8ae74d4c9e4" Nov 25 23:36:21 crc kubenswrapper[5045]: I1125 23:36:21.127244 5045 scope.go:117] "RemoveContainer" containerID="c50547b81a752b7c84224f6649c74b625d780b057b97e1c8821e32a3d8a6af1a" Nov 25 23:36:30 crc kubenswrapper[5045]: I1125 23:36:30.540872 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:36:30 crc kubenswrapper[5045]: I1125 23:36:30.541867 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:36:32 crc kubenswrapper[5045]: I1125 23:36:32.972574 5045 generic.go:334] "Generic (PLEG): container finished" podID="c9631995-c169-41d0-90cb-9d1566919f23" containerID="b0df6116e4f25a21bd6b68490f5bcd5f0f75debb51fd29570358488a50f5b609" exitCode=0 Nov 25 23:36:32 crc kubenswrapper[5045]: I1125 23:36:32.972733 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" event={"ID":"c9631995-c169-41d0-90cb-9d1566919f23","Type":"ContainerDied","Data":"b0df6116e4f25a21bd6b68490f5bcd5f0f75debb51fd29570358488a50f5b609"} Nov 25 23:36:34 crc kubenswrapper[5045]: I1125 23:36:34.543212 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" Nov 25 23:36:34 crc kubenswrapper[5045]: I1125 23:36:34.674087 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2td5\" (UniqueName: \"kubernetes.io/projected/c9631995-c169-41d0-90cb-9d1566919f23-kube-api-access-n2td5\") pod \"c9631995-c169-41d0-90cb-9d1566919f23\" (UID: \"c9631995-c169-41d0-90cb-9d1566919f23\") " Nov 25 23:36:34 crc kubenswrapper[5045]: I1125 23:36:34.674205 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c9631995-c169-41d0-90cb-9d1566919f23-inventory\") pod \"c9631995-c169-41d0-90cb-9d1566919f23\" (UID: \"c9631995-c169-41d0-90cb-9d1566919f23\") " Nov 25 23:36:34 crc kubenswrapper[5045]: I1125 23:36:34.674258 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c9631995-c169-41d0-90cb-9d1566919f23-ssh-key\") pod \"c9631995-c169-41d0-90cb-9d1566919f23\" (UID: \"c9631995-c169-41d0-90cb-9d1566919f23\") " Nov 25 23:36:34 crc kubenswrapper[5045]: I1125 23:36:34.674384 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c9631995-c169-41d0-90cb-9d1566919f23-ceph\") pod \"c9631995-c169-41d0-90cb-9d1566919f23\" (UID: \"c9631995-c169-41d0-90cb-9d1566919f23\") " Nov 25 23:36:34 crc kubenswrapper[5045]: I1125 23:36:34.680237 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9631995-c169-41d0-90cb-9d1566919f23-kube-api-access-n2td5" (OuterVolumeSpecName: "kube-api-access-n2td5") pod "c9631995-c169-41d0-90cb-9d1566919f23" (UID: "c9631995-c169-41d0-90cb-9d1566919f23"). InnerVolumeSpecName "kube-api-access-n2td5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:36:34 crc kubenswrapper[5045]: I1125 23:36:34.685316 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9631995-c169-41d0-90cb-9d1566919f23-ceph" (OuterVolumeSpecName: "ceph") pod "c9631995-c169-41d0-90cb-9d1566919f23" (UID: "c9631995-c169-41d0-90cb-9d1566919f23"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:36:34 crc kubenswrapper[5045]: I1125 23:36:34.714171 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9631995-c169-41d0-90cb-9d1566919f23-inventory" (OuterVolumeSpecName: "inventory") pod "c9631995-c169-41d0-90cb-9d1566919f23" (UID: "c9631995-c169-41d0-90cb-9d1566919f23"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:36:34 crc kubenswrapper[5045]: I1125 23:36:34.719804 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9631995-c169-41d0-90cb-9d1566919f23-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c9631995-c169-41d0-90cb-9d1566919f23" (UID: "c9631995-c169-41d0-90cb-9d1566919f23"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:36:34 crc kubenswrapper[5045]: I1125 23:36:34.776768 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2td5\" (UniqueName: \"kubernetes.io/projected/c9631995-c169-41d0-90cb-9d1566919f23-kube-api-access-n2td5\") on node \"crc\" DevicePath \"\"" Nov 25 23:36:34 crc kubenswrapper[5045]: I1125 23:36:34.776817 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c9631995-c169-41d0-90cb-9d1566919f23-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:36:34 crc kubenswrapper[5045]: I1125 23:36:34.776837 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c9631995-c169-41d0-90cb-9d1566919f23-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:36:34 crc kubenswrapper[5045]: I1125 23:36:34.776857 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c9631995-c169-41d0-90cb-9d1566919f23-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:36:34 crc kubenswrapper[5045]: I1125 23:36:34.996820 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" event={"ID":"c9631995-c169-41d0-90cb-9d1566919f23","Type":"ContainerDied","Data":"4bae7506c90fcd846c57654e3fc3ae11caa3265856bd0747ff14f4eaedd63dca"} Nov 25 23:36:34 crc kubenswrapper[5045]: I1125 23:36:34.997224 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4bae7506c90fcd846c57654e3fc3ae11caa3265856bd0747ff14f4eaedd63dca" Nov 25 23:36:34 crc kubenswrapper[5045]: I1125 23:36:34.997014 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.124565 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz"] Nov 25 23:36:35 crc kubenswrapper[5045]: E1125 23:36:35.125029 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9631995-c169-41d0-90cb-9d1566919f23" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.125052 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9631995-c169-41d0-90cb-9d1566919f23" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.125269 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9631995-c169-41d0-90cb-9d1566919f23" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.126067 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.132815 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.132885 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.133077 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.133180 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.133211 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.169967 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz"] Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.291004 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpz9p\" (UniqueName: \"kubernetes.io/projected/8e685995-6390-45d1-948f-9aa20cef1060-kube-api-access-lpz9p\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz\" (UID: \"8e685995-6390-45d1-948f-9aa20cef1060\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.291102 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8e685995-6390-45d1-948f-9aa20cef1060-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz\" (UID: \"8e685995-6390-45d1-948f-9aa20cef1060\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.291175 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8e685995-6390-45d1-948f-9aa20cef1060-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz\" (UID: \"8e685995-6390-45d1-948f-9aa20cef1060\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.291274 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8e685995-6390-45d1-948f-9aa20cef1060-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz\" (UID: \"8e685995-6390-45d1-948f-9aa20cef1060\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.392771 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8e685995-6390-45d1-948f-9aa20cef1060-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz\" (UID: \"8e685995-6390-45d1-948f-9aa20cef1060\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.392934 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpz9p\" (UniqueName: \"kubernetes.io/projected/8e685995-6390-45d1-948f-9aa20cef1060-kube-api-access-lpz9p\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz\" (UID: \"8e685995-6390-45d1-948f-9aa20cef1060\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.393009 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8e685995-6390-45d1-948f-9aa20cef1060-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz\" (UID: \"8e685995-6390-45d1-948f-9aa20cef1060\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.393112 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8e685995-6390-45d1-948f-9aa20cef1060-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz\" (UID: \"8e685995-6390-45d1-948f-9aa20cef1060\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.399363 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8e685995-6390-45d1-948f-9aa20cef1060-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz\" (UID: \"8e685995-6390-45d1-948f-9aa20cef1060\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.399776 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8e685995-6390-45d1-948f-9aa20cef1060-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz\" (UID: \"8e685995-6390-45d1-948f-9aa20cef1060\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.400661 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8e685995-6390-45d1-948f-9aa20cef1060-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz\" (UID: \"8e685995-6390-45d1-948f-9aa20cef1060\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.415926 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpz9p\" (UniqueName: \"kubernetes.io/projected/8e685995-6390-45d1-948f-9aa20cef1060-kube-api-access-lpz9p\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz\" (UID: \"8e685995-6390-45d1-948f-9aa20cef1060\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" Nov 25 23:36:35 crc kubenswrapper[5045]: I1125 23:36:35.481390 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" Nov 25 23:36:36 crc kubenswrapper[5045]: I1125 23:36:36.142416 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz"] Nov 25 23:36:37 crc kubenswrapper[5045]: I1125 23:36:37.016572 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" event={"ID":"8e685995-6390-45d1-948f-9aa20cef1060","Type":"ContainerStarted","Data":"ec9a079c91616d518fb7ffb2c35448a4044ef4c42aed773ee650d6f9ac3948bf"} Nov 25 23:36:37 crc kubenswrapper[5045]: I1125 23:36:37.016923 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" event={"ID":"8e685995-6390-45d1-948f-9aa20cef1060","Type":"ContainerStarted","Data":"d1e138d6dbaf2d0dd80e2bb8562de01a55933d952461d4792a3dc1819cb01e30"} Nov 25 23:36:37 crc kubenswrapper[5045]: I1125 23:36:37.042582 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" podStartSLOduration=1.613537597 podStartE2EDuration="2.042562755s" podCreationTimestamp="2025-11-25 23:36:35 +0000 UTC" firstStartedPulling="2025-11-25 23:36:36.145291901 +0000 UTC m=+2252.502951053" lastFinishedPulling="2025-11-25 23:36:36.574317059 +0000 UTC m=+2252.931976211" observedRunningTime="2025-11-25 23:36:37.036517365 +0000 UTC m=+2253.394176547" watchObservedRunningTime="2025-11-25 23:36:37.042562755 +0000 UTC m=+2253.400221887" Nov 25 23:36:42 crc kubenswrapper[5045]: I1125 23:36:42.112179 5045 generic.go:334] "Generic (PLEG): container finished" podID="8e685995-6390-45d1-948f-9aa20cef1060" containerID="ec9a079c91616d518fb7ffb2c35448a4044ef4c42aed773ee650d6f9ac3948bf" exitCode=0 Nov 25 23:36:42 crc kubenswrapper[5045]: I1125 23:36:42.112316 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" event={"ID":"8e685995-6390-45d1-948f-9aa20cef1060","Type":"ContainerDied","Data":"ec9a079c91616d518fb7ffb2c35448a4044ef4c42aed773ee650d6f9ac3948bf"} Nov 25 23:36:43 crc kubenswrapper[5045]: I1125 23:36:43.455204 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" Nov 25 23:36:43 crc kubenswrapper[5045]: I1125 23:36:43.536596 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8e685995-6390-45d1-948f-9aa20cef1060-ssh-key\") pod \"8e685995-6390-45d1-948f-9aa20cef1060\" (UID: \"8e685995-6390-45d1-948f-9aa20cef1060\") " Nov 25 23:36:43 crc kubenswrapper[5045]: I1125 23:36:43.536885 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8e685995-6390-45d1-948f-9aa20cef1060-inventory\") pod \"8e685995-6390-45d1-948f-9aa20cef1060\" (UID: \"8e685995-6390-45d1-948f-9aa20cef1060\") " Nov 25 23:36:43 crc kubenswrapper[5045]: I1125 23:36:43.537002 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lpz9p\" (UniqueName: \"kubernetes.io/projected/8e685995-6390-45d1-948f-9aa20cef1060-kube-api-access-lpz9p\") pod \"8e685995-6390-45d1-948f-9aa20cef1060\" (UID: \"8e685995-6390-45d1-948f-9aa20cef1060\") " Nov 25 23:36:43 crc kubenswrapper[5045]: I1125 23:36:43.537133 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8e685995-6390-45d1-948f-9aa20cef1060-ceph\") pod \"8e685995-6390-45d1-948f-9aa20cef1060\" (UID: \"8e685995-6390-45d1-948f-9aa20cef1060\") " Nov 25 23:36:43 crc kubenswrapper[5045]: I1125 23:36:43.543180 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e685995-6390-45d1-948f-9aa20cef1060-kube-api-access-lpz9p" (OuterVolumeSpecName: "kube-api-access-lpz9p") pod "8e685995-6390-45d1-948f-9aa20cef1060" (UID: "8e685995-6390-45d1-948f-9aa20cef1060"). InnerVolumeSpecName "kube-api-access-lpz9p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:36:43 crc kubenswrapper[5045]: I1125 23:36:43.543437 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e685995-6390-45d1-948f-9aa20cef1060-ceph" (OuterVolumeSpecName: "ceph") pod "8e685995-6390-45d1-948f-9aa20cef1060" (UID: "8e685995-6390-45d1-948f-9aa20cef1060"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:36:43 crc kubenswrapper[5045]: I1125 23:36:43.565529 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e685995-6390-45d1-948f-9aa20cef1060-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8e685995-6390-45d1-948f-9aa20cef1060" (UID: "8e685995-6390-45d1-948f-9aa20cef1060"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:36:43 crc kubenswrapper[5045]: I1125 23:36:43.574647 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e685995-6390-45d1-948f-9aa20cef1060-inventory" (OuterVolumeSpecName: "inventory") pod "8e685995-6390-45d1-948f-9aa20cef1060" (UID: "8e685995-6390-45d1-948f-9aa20cef1060"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:36:43 crc kubenswrapper[5045]: I1125 23:36:43.639957 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8e685995-6390-45d1-948f-9aa20cef1060-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:36:43 crc kubenswrapper[5045]: I1125 23:36:43.639995 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8e685995-6390-45d1-948f-9aa20cef1060-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:36:43 crc kubenswrapper[5045]: I1125 23:36:43.640012 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lpz9p\" (UniqueName: \"kubernetes.io/projected/8e685995-6390-45d1-948f-9aa20cef1060-kube-api-access-lpz9p\") on node \"crc\" DevicePath \"\"" Nov 25 23:36:43 crc kubenswrapper[5045]: I1125 23:36:43.640026 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8e685995-6390-45d1-948f-9aa20cef1060-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.130688 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" event={"ID":"8e685995-6390-45d1-948f-9aa20cef1060","Type":"ContainerDied","Data":"d1e138d6dbaf2d0dd80e2bb8562de01a55933d952461d4792a3dc1819cb01e30"} Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.131085 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d1e138d6dbaf2d0dd80e2bb8562de01a55933d952461d4792a3dc1819cb01e30" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.130827 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.324199 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw"] Nov 25 23:36:44 crc kubenswrapper[5045]: E1125 23:36:44.324641 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e685995-6390-45d1-948f-9aa20cef1060" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.324663 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e685995-6390-45d1-948f-9aa20cef1060" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.324883 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e685995-6390-45d1-948f-9aa20cef1060" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.325456 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.360068 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.360138 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.360219 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.360472 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.360773 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.360855 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-df44n\" (UniqueName: \"kubernetes.io/projected/f0795a52-0ba0-497f-a55c-8888a54c0fa8-kube-api-access-df44n\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lzmxw\" (UID: \"f0795a52-0ba0-497f-a55c-8888a54c0fa8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.360928 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f0795a52-0ba0-497f-a55c-8888a54c0fa8-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lzmxw\" (UID: \"f0795a52-0ba0-497f-a55c-8888a54c0fa8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.360985 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f0795a52-0ba0-497f-a55c-8888a54c0fa8-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lzmxw\" (UID: \"f0795a52-0ba0-497f-a55c-8888a54c0fa8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.361024 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f0795a52-0ba0-497f-a55c-8888a54c0fa8-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lzmxw\" (UID: \"f0795a52-0ba0-497f-a55c-8888a54c0fa8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.389977 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw"] Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.462735 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-df44n\" (UniqueName: \"kubernetes.io/projected/f0795a52-0ba0-497f-a55c-8888a54c0fa8-kube-api-access-df44n\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lzmxw\" (UID: \"f0795a52-0ba0-497f-a55c-8888a54c0fa8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.462811 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f0795a52-0ba0-497f-a55c-8888a54c0fa8-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lzmxw\" (UID: \"f0795a52-0ba0-497f-a55c-8888a54c0fa8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.462882 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f0795a52-0ba0-497f-a55c-8888a54c0fa8-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lzmxw\" (UID: \"f0795a52-0ba0-497f-a55c-8888a54c0fa8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.462940 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f0795a52-0ba0-497f-a55c-8888a54c0fa8-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lzmxw\" (UID: \"f0795a52-0ba0-497f-a55c-8888a54c0fa8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.466838 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f0795a52-0ba0-497f-a55c-8888a54c0fa8-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lzmxw\" (UID: \"f0795a52-0ba0-497f-a55c-8888a54c0fa8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.467044 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f0795a52-0ba0-497f-a55c-8888a54c0fa8-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lzmxw\" (UID: \"f0795a52-0ba0-497f-a55c-8888a54c0fa8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.467407 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f0795a52-0ba0-497f-a55c-8888a54c0fa8-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lzmxw\" (UID: \"f0795a52-0ba0-497f-a55c-8888a54c0fa8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.478004 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-df44n\" (UniqueName: \"kubernetes.io/projected/f0795a52-0ba0-497f-a55c-8888a54c0fa8-kube-api-access-df44n\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lzmxw\" (UID: \"f0795a52-0ba0-497f-a55c-8888a54c0fa8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" Nov 25 23:36:44 crc kubenswrapper[5045]: I1125 23:36:44.690227 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" Nov 25 23:36:45 crc kubenswrapper[5045]: I1125 23:36:45.311062 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw"] Nov 25 23:36:46 crc kubenswrapper[5045]: I1125 23:36:46.148104 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" event={"ID":"f0795a52-0ba0-497f-a55c-8888a54c0fa8","Type":"ContainerStarted","Data":"9722188c326cdb07631a756cb4f2d635ae4525c3f32ad0d9d517f845af52f457"} Nov 25 23:36:47 crc kubenswrapper[5045]: I1125 23:36:47.165284 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" event={"ID":"f0795a52-0ba0-497f-a55c-8888a54c0fa8","Type":"ContainerStarted","Data":"05dadb43547e41b1f27e4731f567956a7aea559f28e2825188b8e8854ef05ae9"} Nov 25 23:36:47 crc kubenswrapper[5045]: I1125 23:36:47.187412 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" podStartSLOduration=2.574660638 podStartE2EDuration="3.18739337s" podCreationTimestamp="2025-11-25 23:36:44 +0000 UTC" firstStartedPulling="2025-11-25 23:36:45.313767609 +0000 UTC m=+2261.671426721" lastFinishedPulling="2025-11-25 23:36:45.926500341 +0000 UTC m=+2262.284159453" observedRunningTime="2025-11-25 23:36:47.184613261 +0000 UTC m=+2263.542272413" watchObservedRunningTime="2025-11-25 23:36:47.18739337 +0000 UTC m=+2263.545052492" Nov 25 23:37:00 crc kubenswrapper[5045]: I1125 23:37:00.541275 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:37:00 crc kubenswrapper[5045]: I1125 23:37:00.542130 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:37:00 crc kubenswrapper[5045]: I1125 23:37:00.542214 5045 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 23:37:00 crc kubenswrapper[5045]: I1125 23:37:00.543699 5045 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5"} pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 23:37:00 crc kubenswrapper[5045]: I1125 23:37:00.543890 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" containerID="cri-o://46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" gracePeriod=600 Nov 25 23:37:00 crc kubenswrapper[5045]: E1125 23:37:00.701156 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:37:01 crc kubenswrapper[5045]: I1125 23:37:01.319277 5045 generic.go:334] "Generic (PLEG): container finished" podID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" exitCode=0 Nov 25 23:37:01 crc kubenswrapper[5045]: I1125 23:37:01.319355 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerDied","Data":"46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5"} Nov 25 23:37:01 crc kubenswrapper[5045]: I1125 23:37:01.319415 5045 scope.go:117] "RemoveContainer" containerID="a33f156d04cc1e10a7f2c15e4d00ab8b26b2ed9c77c496c8385c52559bc0d4b3" Nov 25 23:37:01 crc kubenswrapper[5045]: I1125 23:37:01.320251 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:37:01 crc kubenswrapper[5045]: E1125 23:37:01.320909 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:37:16 crc kubenswrapper[5045]: I1125 23:37:16.397683 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:37:16 crc kubenswrapper[5045]: E1125 23:37:16.398958 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:37:25 crc kubenswrapper[5045]: I1125 23:37:25.588984 5045 generic.go:334] "Generic (PLEG): container finished" podID="f0795a52-0ba0-497f-a55c-8888a54c0fa8" containerID="05dadb43547e41b1f27e4731f567956a7aea559f28e2825188b8e8854ef05ae9" exitCode=0 Nov 25 23:37:25 crc kubenswrapper[5045]: I1125 23:37:25.589071 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" event={"ID":"f0795a52-0ba0-497f-a55c-8888a54c0fa8","Type":"ContainerDied","Data":"05dadb43547e41b1f27e4731f567956a7aea559f28e2825188b8e8854ef05ae9"} Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.175980 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.324109 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f0795a52-0ba0-497f-a55c-8888a54c0fa8-inventory\") pod \"f0795a52-0ba0-497f-a55c-8888a54c0fa8\" (UID: \"f0795a52-0ba0-497f-a55c-8888a54c0fa8\") " Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.324404 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-df44n\" (UniqueName: \"kubernetes.io/projected/f0795a52-0ba0-497f-a55c-8888a54c0fa8-kube-api-access-df44n\") pod \"f0795a52-0ba0-497f-a55c-8888a54c0fa8\" (UID: \"f0795a52-0ba0-497f-a55c-8888a54c0fa8\") " Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.324553 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f0795a52-0ba0-497f-a55c-8888a54c0fa8-ssh-key\") pod \"f0795a52-0ba0-497f-a55c-8888a54c0fa8\" (UID: \"f0795a52-0ba0-497f-a55c-8888a54c0fa8\") " Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.324655 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f0795a52-0ba0-497f-a55c-8888a54c0fa8-ceph\") pod \"f0795a52-0ba0-497f-a55c-8888a54c0fa8\" (UID: \"f0795a52-0ba0-497f-a55c-8888a54c0fa8\") " Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.335401 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0795a52-0ba0-497f-a55c-8888a54c0fa8-ceph" (OuterVolumeSpecName: "ceph") pod "f0795a52-0ba0-497f-a55c-8888a54c0fa8" (UID: "f0795a52-0ba0-497f-a55c-8888a54c0fa8"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.337357 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0795a52-0ba0-497f-a55c-8888a54c0fa8-kube-api-access-df44n" (OuterVolumeSpecName: "kube-api-access-df44n") pod "f0795a52-0ba0-497f-a55c-8888a54c0fa8" (UID: "f0795a52-0ba0-497f-a55c-8888a54c0fa8"). InnerVolumeSpecName "kube-api-access-df44n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.363501 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0795a52-0ba0-497f-a55c-8888a54c0fa8-inventory" (OuterVolumeSpecName: "inventory") pod "f0795a52-0ba0-497f-a55c-8888a54c0fa8" (UID: "f0795a52-0ba0-497f-a55c-8888a54c0fa8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.364319 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0795a52-0ba0-497f-a55c-8888a54c0fa8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "f0795a52-0ba0-497f-a55c-8888a54c0fa8" (UID: "f0795a52-0ba0-497f-a55c-8888a54c0fa8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.428169 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f0795a52-0ba0-497f-a55c-8888a54c0fa8-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.428207 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-df44n\" (UniqueName: \"kubernetes.io/projected/f0795a52-0ba0-497f-a55c-8888a54c0fa8-kube-api-access-df44n\") on node \"crc\" DevicePath \"\"" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.428222 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f0795a52-0ba0-497f-a55c-8888a54c0fa8-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.428230 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f0795a52-0ba0-497f-a55c-8888a54c0fa8-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.611026 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" event={"ID":"f0795a52-0ba0-497f-a55c-8888a54c0fa8","Type":"ContainerDied","Data":"9722188c326cdb07631a756cb4f2d635ae4525c3f32ad0d9d517f845af52f457"} Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.611100 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9722188c326cdb07631a756cb4f2d635ae4525c3f32ad0d9d517f845af52f457" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.611140 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lzmxw" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.767203 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5"] Nov 25 23:37:27 crc kubenswrapper[5045]: E1125 23:37:27.767805 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0795a52-0ba0-497f-a55c-8888a54c0fa8" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.767832 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0795a52-0ba0-497f-a55c-8888a54c0fa8" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.768082 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0795a52-0ba0-497f-a55c-8888a54c0fa8" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.768929 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.778195 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.778385 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.778427 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.778530 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.778561 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.786429 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5"] Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.838673 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8e0f7b11-159a-4941-9abe-03adde83a57c-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5\" (UID: \"8e0f7b11-159a-4941-9abe-03adde83a57c\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.838762 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67zl7\" (UniqueName: \"kubernetes.io/projected/8e0f7b11-159a-4941-9abe-03adde83a57c-kube-api-access-67zl7\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5\" (UID: \"8e0f7b11-159a-4941-9abe-03adde83a57c\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.839060 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8e0f7b11-159a-4941-9abe-03adde83a57c-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5\" (UID: \"8e0f7b11-159a-4941-9abe-03adde83a57c\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.839413 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8e0f7b11-159a-4941-9abe-03adde83a57c-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5\" (UID: \"8e0f7b11-159a-4941-9abe-03adde83a57c\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.940737 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67zl7\" (UniqueName: \"kubernetes.io/projected/8e0f7b11-159a-4941-9abe-03adde83a57c-kube-api-access-67zl7\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5\" (UID: \"8e0f7b11-159a-4941-9abe-03adde83a57c\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.940842 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8e0f7b11-159a-4941-9abe-03adde83a57c-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5\" (UID: \"8e0f7b11-159a-4941-9abe-03adde83a57c\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.940906 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8e0f7b11-159a-4941-9abe-03adde83a57c-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5\" (UID: \"8e0f7b11-159a-4941-9abe-03adde83a57c\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.940957 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8e0f7b11-159a-4941-9abe-03adde83a57c-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5\" (UID: \"8e0f7b11-159a-4941-9abe-03adde83a57c\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.946570 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8e0f7b11-159a-4941-9abe-03adde83a57c-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5\" (UID: \"8e0f7b11-159a-4941-9abe-03adde83a57c\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.947117 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8e0f7b11-159a-4941-9abe-03adde83a57c-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5\" (UID: \"8e0f7b11-159a-4941-9abe-03adde83a57c\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.948640 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8e0f7b11-159a-4941-9abe-03adde83a57c-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5\" (UID: \"8e0f7b11-159a-4941-9abe-03adde83a57c\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" Nov 25 23:37:27 crc kubenswrapper[5045]: I1125 23:37:27.959851 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67zl7\" (UniqueName: \"kubernetes.io/projected/8e0f7b11-159a-4941-9abe-03adde83a57c-kube-api-access-67zl7\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5\" (UID: \"8e0f7b11-159a-4941-9abe-03adde83a57c\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" Nov 25 23:37:28 crc kubenswrapper[5045]: I1125 23:37:28.097419 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" Nov 25 23:37:28 crc kubenswrapper[5045]: I1125 23:37:28.397508 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:37:28 crc kubenswrapper[5045]: E1125 23:37:28.398328 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:37:28 crc kubenswrapper[5045]: I1125 23:37:28.676082 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5"] Nov 25 23:37:29 crc kubenswrapper[5045]: I1125 23:37:29.638038 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" event={"ID":"8e0f7b11-159a-4941-9abe-03adde83a57c","Type":"ContainerStarted","Data":"c0a8101a0b882216a2c7cde25463a9f54a831efb8eadb4a26e20055c4b76b755"} Nov 25 23:37:29 crc kubenswrapper[5045]: I1125 23:37:29.638553 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" event={"ID":"8e0f7b11-159a-4941-9abe-03adde83a57c","Type":"ContainerStarted","Data":"451956f2ba066f1ff4c5cc737fa8af0887c9743ed4b8f8e12b9efbb72279b956"} Nov 25 23:37:29 crc kubenswrapper[5045]: I1125 23:37:29.668539 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" podStartSLOduration=2.225626988 podStartE2EDuration="2.668506209s" podCreationTimestamp="2025-11-25 23:37:27 +0000 UTC" firstStartedPulling="2025-11-25 23:37:28.680194654 +0000 UTC m=+2305.037853766" lastFinishedPulling="2025-11-25 23:37:29.123073835 +0000 UTC m=+2305.480732987" observedRunningTime="2025-11-25 23:37:29.667354936 +0000 UTC m=+2306.025014048" watchObservedRunningTime="2025-11-25 23:37:29.668506209 +0000 UTC m=+2306.026165331" Nov 25 23:37:33 crc kubenswrapper[5045]: I1125 23:37:33.678272 5045 generic.go:334] "Generic (PLEG): container finished" podID="8e0f7b11-159a-4941-9abe-03adde83a57c" containerID="c0a8101a0b882216a2c7cde25463a9f54a831efb8eadb4a26e20055c4b76b755" exitCode=0 Nov 25 23:37:33 crc kubenswrapper[5045]: I1125 23:37:33.678349 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" event={"ID":"8e0f7b11-159a-4941-9abe-03adde83a57c","Type":"ContainerDied","Data":"c0a8101a0b882216a2c7cde25463a9f54a831efb8eadb4a26e20055c4b76b755"} Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.178348 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.314291 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8e0f7b11-159a-4941-9abe-03adde83a57c-inventory\") pod \"8e0f7b11-159a-4941-9abe-03adde83a57c\" (UID: \"8e0f7b11-159a-4941-9abe-03adde83a57c\") " Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.314417 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-67zl7\" (UniqueName: \"kubernetes.io/projected/8e0f7b11-159a-4941-9abe-03adde83a57c-kube-api-access-67zl7\") pod \"8e0f7b11-159a-4941-9abe-03adde83a57c\" (UID: \"8e0f7b11-159a-4941-9abe-03adde83a57c\") " Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.314442 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8e0f7b11-159a-4941-9abe-03adde83a57c-ceph\") pod \"8e0f7b11-159a-4941-9abe-03adde83a57c\" (UID: \"8e0f7b11-159a-4941-9abe-03adde83a57c\") " Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.314546 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8e0f7b11-159a-4941-9abe-03adde83a57c-ssh-key\") pod \"8e0f7b11-159a-4941-9abe-03adde83a57c\" (UID: \"8e0f7b11-159a-4941-9abe-03adde83a57c\") " Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.322158 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e0f7b11-159a-4941-9abe-03adde83a57c-kube-api-access-67zl7" (OuterVolumeSpecName: "kube-api-access-67zl7") pod "8e0f7b11-159a-4941-9abe-03adde83a57c" (UID: "8e0f7b11-159a-4941-9abe-03adde83a57c"). InnerVolumeSpecName "kube-api-access-67zl7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.323860 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e0f7b11-159a-4941-9abe-03adde83a57c-ceph" (OuterVolumeSpecName: "ceph") pod "8e0f7b11-159a-4941-9abe-03adde83a57c" (UID: "8e0f7b11-159a-4941-9abe-03adde83a57c"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.347888 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e0f7b11-159a-4941-9abe-03adde83a57c-inventory" (OuterVolumeSpecName: "inventory") pod "8e0f7b11-159a-4941-9abe-03adde83a57c" (UID: "8e0f7b11-159a-4941-9abe-03adde83a57c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.358425 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e0f7b11-159a-4941-9abe-03adde83a57c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8e0f7b11-159a-4941-9abe-03adde83a57c" (UID: "8e0f7b11-159a-4941-9abe-03adde83a57c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.416571 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-67zl7\" (UniqueName: \"kubernetes.io/projected/8e0f7b11-159a-4941-9abe-03adde83a57c-kube-api-access-67zl7\") on node \"crc\" DevicePath \"\"" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.416612 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8e0f7b11-159a-4941-9abe-03adde83a57c-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.416624 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8e0f7b11-159a-4941-9abe-03adde83a57c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.416635 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8e0f7b11-159a-4941-9abe-03adde83a57c-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.699943 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" event={"ID":"8e0f7b11-159a-4941-9abe-03adde83a57c","Type":"ContainerDied","Data":"451956f2ba066f1ff4c5cc737fa8af0887c9743ed4b8f8e12b9efbb72279b956"} Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.700017 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="451956f2ba066f1ff4c5cc737fa8af0887c9743ed4b8f8e12b9efbb72279b956" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.700654 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.804038 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h"] Nov 25 23:37:35 crc kubenswrapper[5045]: E1125 23:37:35.804525 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e0f7b11-159a-4941-9abe-03adde83a57c" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.804550 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e0f7b11-159a-4941-9abe-03adde83a57c" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.804810 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e0f7b11-159a-4941-9abe-03adde83a57c" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.805607 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.808430 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.808595 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.808918 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.808990 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.809881 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.817120 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h"] Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.928676 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4nxr\" (UniqueName: \"kubernetes.io/projected/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-kube-api-access-q4nxr\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h\" (UID: \"6ffaaa5a-fd54-4e1a-8591-6991152aa8de\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.928890 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h\" (UID: \"6ffaaa5a-fd54-4e1a-8591-6991152aa8de\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.929056 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h\" (UID: \"6ffaaa5a-fd54-4e1a-8591-6991152aa8de\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" Nov 25 23:37:35 crc kubenswrapper[5045]: I1125 23:37:35.929126 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h\" (UID: \"6ffaaa5a-fd54-4e1a-8591-6991152aa8de\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" Nov 25 23:37:36 crc kubenswrapper[5045]: I1125 23:37:36.030474 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4nxr\" (UniqueName: \"kubernetes.io/projected/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-kube-api-access-q4nxr\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h\" (UID: \"6ffaaa5a-fd54-4e1a-8591-6991152aa8de\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" Nov 25 23:37:36 crc kubenswrapper[5045]: I1125 23:37:36.030569 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h\" (UID: \"6ffaaa5a-fd54-4e1a-8591-6991152aa8de\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" Nov 25 23:37:36 crc kubenswrapper[5045]: I1125 23:37:36.030672 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h\" (UID: \"6ffaaa5a-fd54-4e1a-8591-6991152aa8de\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" Nov 25 23:37:36 crc kubenswrapper[5045]: I1125 23:37:36.030767 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h\" (UID: \"6ffaaa5a-fd54-4e1a-8591-6991152aa8de\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" Nov 25 23:37:36 crc kubenswrapper[5045]: I1125 23:37:36.034807 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h\" (UID: \"6ffaaa5a-fd54-4e1a-8591-6991152aa8de\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" Nov 25 23:37:36 crc kubenswrapper[5045]: I1125 23:37:36.036022 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h\" (UID: \"6ffaaa5a-fd54-4e1a-8591-6991152aa8de\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" Nov 25 23:37:36 crc kubenswrapper[5045]: I1125 23:37:36.036383 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h\" (UID: \"6ffaaa5a-fd54-4e1a-8591-6991152aa8de\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" Nov 25 23:37:36 crc kubenswrapper[5045]: I1125 23:37:36.046086 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4nxr\" (UniqueName: \"kubernetes.io/projected/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-kube-api-access-q4nxr\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h\" (UID: \"6ffaaa5a-fd54-4e1a-8591-6991152aa8de\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" Nov 25 23:37:36 crc kubenswrapper[5045]: I1125 23:37:36.128908 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" Nov 25 23:37:36 crc kubenswrapper[5045]: I1125 23:37:36.727273 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h"] Nov 25 23:37:36 crc kubenswrapper[5045]: W1125 23:37:36.735289 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6ffaaa5a_fd54_4e1a_8591_6991152aa8de.slice/crio-f4820b1960c882ea15604797dca2bee6101091a4a51dd7c8b1d08822a561635d WatchSource:0}: Error finding container f4820b1960c882ea15604797dca2bee6101091a4a51dd7c8b1d08822a561635d: Status 404 returned error can't find the container with id f4820b1960c882ea15604797dca2bee6101091a4a51dd7c8b1d08822a561635d Nov 25 23:37:37 crc kubenswrapper[5045]: I1125 23:37:37.720171 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" event={"ID":"6ffaaa5a-fd54-4e1a-8591-6991152aa8de","Type":"ContainerStarted","Data":"3bbf504c27db83f264f729c04c09283973657d798b12baab425068466e4594cc"} Nov 25 23:37:37 crc kubenswrapper[5045]: I1125 23:37:37.720435 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" event={"ID":"6ffaaa5a-fd54-4e1a-8591-6991152aa8de","Type":"ContainerStarted","Data":"f4820b1960c882ea15604797dca2bee6101091a4a51dd7c8b1d08822a561635d"} Nov 25 23:37:37 crc kubenswrapper[5045]: I1125 23:37:37.745154 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" podStartSLOduration=2.270814146 podStartE2EDuration="2.74513179s" podCreationTimestamp="2025-11-25 23:37:35 +0000 UTC" firstStartedPulling="2025-11-25 23:37:36.738826783 +0000 UTC m=+2313.096485935" lastFinishedPulling="2025-11-25 23:37:37.213144437 +0000 UTC m=+2313.570803579" observedRunningTime="2025-11-25 23:37:37.738500781 +0000 UTC m=+2314.096159913" watchObservedRunningTime="2025-11-25 23:37:37.74513179 +0000 UTC m=+2314.102790922" Nov 25 23:37:43 crc kubenswrapper[5045]: I1125 23:37:43.396215 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:37:43 crc kubenswrapper[5045]: E1125 23:37:43.397140 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:37:57 crc kubenswrapper[5045]: I1125 23:37:57.397098 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:37:57 crc kubenswrapper[5045]: E1125 23:37:57.398124 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:38:10 crc kubenswrapper[5045]: I1125 23:38:10.398305 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:38:10 crc kubenswrapper[5045]: E1125 23:38:10.399445 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:38:23 crc kubenswrapper[5045]: I1125 23:38:23.397318 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:38:23 crc kubenswrapper[5045]: E1125 23:38:23.398537 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:38:28 crc kubenswrapper[5045]: I1125 23:38:28.346622 5045 generic.go:334] "Generic (PLEG): container finished" podID="6ffaaa5a-fd54-4e1a-8591-6991152aa8de" containerID="3bbf504c27db83f264f729c04c09283973657d798b12baab425068466e4594cc" exitCode=0 Nov 25 23:38:28 crc kubenswrapper[5045]: I1125 23:38:28.346739 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" event={"ID":"6ffaaa5a-fd54-4e1a-8591-6991152aa8de","Type":"ContainerDied","Data":"3bbf504c27db83f264f729c04c09283973657d798b12baab425068466e4594cc"} Nov 25 23:38:29 crc kubenswrapper[5045]: I1125 23:38:29.764912 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" Nov 25 23:38:29 crc kubenswrapper[5045]: I1125 23:38:29.931036 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-ceph\") pod \"6ffaaa5a-fd54-4e1a-8591-6991152aa8de\" (UID: \"6ffaaa5a-fd54-4e1a-8591-6991152aa8de\") " Nov 25 23:38:29 crc kubenswrapper[5045]: I1125 23:38:29.931091 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-ssh-key\") pod \"6ffaaa5a-fd54-4e1a-8591-6991152aa8de\" (UID: \"6ffaaa5a-fd54-4e1a-8591-6991152aa8de\") " Nov 25 23:38:29 crc kubenswrapper[5045]: I1125 23:38:29.931152 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4nxr\" (UniqueName: \"kubernetes.io/projected/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-kube-api-access-q4nxr\") pod \"6ffaaa5a-fd54-4e1a-8591-6991152aa8de\" (UID: \"6ffaaa5a-fd54-4e1a-8591-6991152aa8de\") " Nov 25 23:38:29 crc kubenswrapper[5045]: I1125 23:38:29.931245 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-inventory\") pod \"6ffaaa5a-fd54-4e1a-8591-6991152aa8de\" (UID: \"6ffaaa5a-fd54-4e1a-8591-6991152aa8de\") " Nov 25 23:38:29 crc kubenswrapper[5045]: I1125 23:38:29.970186 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-ceph" (OuterVolumeSpecName: "ceph") pod "6ffaaa5a-fd54-4e1a-8591-6991152aa8de" (UID: "6ffaaa5a-fd54-4e1a-8591-6991152aa8de"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:38:29 crc kubenswrapper[5045]: I1125 23:38:29.970865 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-kube-api-access-q4nxr" (OuterVolumeSpecName: "kube-api-access-q4nxr") pod "6ffaaa5a-fd54-4e1a-8591-6991152aa8de" (UID: "6ffaaa5a-fd54-4e1a-8591-6991152aa8de"). InnerVolumeSpecName "kube-api-access-q4nxr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:38:29 crc kubenswrapper[5045]: I1125 23:38:29.973558 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6ffaaa5a-fd54-4e1a-8591-6991152aa8de" (UID: "6ffaaa5a-fd54-4e1a-8591-6991152aa8de"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:38:29 crc kubenswrapper[5045]: I1125 23:38:29.973870 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-inventory" (OuterVolumeSpecName: "inventory") pod "6ffaaa5a-fd54-4e1a-8591-6991152aa8de" (UID: "6ffaaa5a-fd54-4e1a-8591-6991152aa8de"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.033006 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.033040 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4nxr\" (UniqueName: \"kubernetes.io/projected/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-kube-api-access-q4nxr\") on node \"crc\" DevicePath \"\"" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.033052 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.033060 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6ffaaa5a-fd54-4e1a-8591-6991152aa8de-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.379514 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" event={"ID":"6ffaaa5a-fd54-4e1a-8591-6991152aa8de","Type":"ContainerDied","Data":"f4820b1960c882ea15604797dca2bee6101091a4a51dd7c8b1d08822a561635d"} Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.379923 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f4820b1960c882ea15604797dca2bee6101091a4a51dd7c8b1d08822a561635d" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.380343 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.501397 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-cpmnq"] Nov 25 23:38:30 crc kubenswrapper[5045]: E1125 23:38:30.502023 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ffaaa5a-fd54-4e1a-8591-6991152aa8de" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.502053 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ffaaa5a-fd54-4e1a-8591-6991152aa8de" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.502359 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ffaaa5a-fd54-4e1a-8591-6991152aa8de" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.503325 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.506509 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.506974 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.507227 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.507466 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.507670 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.556626 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-cpmnq"] Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.649181 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dx584\" (UniqueName: \"kubernetes.io/projected/7747bcca-12c7-4cf7-82e3-2a554f853dce-kube-api-access-dx584\") pod \"ssh-known-hosts-edpm-deployment-cpmnq\" (UID: \"7747bcca-12c7-4cf7-82e3-2a554f853dce\") " pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.649284 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7747bcca-12c7-4cf7-82e3-2a554f853dce-ceph\") pod \"ssh-known-hosts-edpm-deployment-cpmnq\" (UID: \"7747bcca-12c7-4cf7-82e3-2a554f853dce\") " pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.649586 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7747bcca-12c7-4cf7-82e3-2a554f853dce-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-cpmnq\" (UID: \"7747bcca-12c7-4cf7-82e3-2a554f853dce\") " pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.649689 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/7747bcca-12c7-4cf7-82e3-2a554f853dce-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-cpmnq\" (UID: \"7747bcca-12c7-4cf7-82e3-2a554f853dce\") " pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.752805 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dx584\" (UniqueName: \"kubernetes.io/projected/7747bcca-12c7-4cf7-82e3-2a554f853dce-kube-api-access-dx584\") pod \"ssh-known-hosts-edpm-deployment-cpmnq\" (UID: \"7747bcca-12c7-4cf7-82e3-2a554f853dce\") " pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.752941 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7747bcca-12c7-4cf7-82e3-2a554f853dce-ceph\") pod \"ssh-known-hosts-edpm-deployment-cpmnq\" (UID: \"7747bcca-12c7-4cf7-82e3-2a554f853dce\") " pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.753098 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7747bcca-12c7-4cf7-82e3-2a554f853dce-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-cpmnq\" (UID: \"7747bcca-12c7-4cf7-82e3-2a554f853dce\") " pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.753555 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/7747bcca-12c7-4cf7-82e3-2a554f853dce-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-cpmnq\" (UID: \"7747bcca-12c7-4cf7-82e3-2a554f853dce\") " pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.758277 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7747bcca-12c7-4cf7-82e3-2a554f853dce-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-cpmnq\" (UID: \"7747bcca-12c7-4cf7-82e3-2a554f853dce\") " pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.759818 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7747bcca-12c7-4cf7-82e3-2a554f853dce-ceph\") pod \"ssh-known-hosts-edpm-deployment-cpmnq\" (UID: \"7747bcca-12c7-4cf7-82e3-2a554f853dce\") " pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.760453 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/7747bcca-12c7-4cf7-82e3-2a554f853dce-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-cpmnq\" (UID: \"7747bcca-12c7-4cf7-82e3-2a554f853dce\") " pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.786440 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dx584\" (UniqueName: \"kubernetes.io/projected/7747bcca-12c7-4cf7-82e3-2a554f853dce-kube-api-access-dx584\") pod \"ssh-known-hosts-edpm-deployment-cpmnq\" (UID: \"7747bcca-12c7-4cf7-82e3-2a554f853dce\") " pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" Nov 25 23:38:30 crc kubenswrapper[5045]: I1125 23:38:30.857871 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" Nov 25 23:38:31 crc kubenswrapper[5045]: W1125 23:38:31.542187 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7747bcca_12c7_4cf7_82e3_2a554f853dce.slice/crio-caa8b5ba0a44267d063f00a8908a3bbbe5dbff6793daa0c8d45b6646508d61dd WatchSource:0}: Error finding container caa8b5ba0a44267d063f00a8908a3bbbe5dbff6793daa0c8d45b6646508d61dd: Status 404 returned error can't find the container with id caa8b5ba0a44267d063f00a8908a3bbbe5dbff6793daa0c8d45b6646508d61dd Nov 25 23:38:31 crc kubenswrapper[5045]: I1125 23:38:31.552529 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-cpmnq"] Nov 25 23:38:32 crc kubenswrapper[5045]: I1125 23:38:32.413263 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" event={"ID":"7747bcca-12c7-4cf7-82e3-2a554f853dce","Type":"ContainerStarted","Data":"9576a0f5e2af4f6a8cf7d3cc4627c197a2c8b722951471147bf06c41a09f9c3d"} Nov 25 23:38:32 crc kubenswrapper[5045]: I1125 23:38:32.413526 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" event={"ID":"7747bcca-12c7-4cf7-82e3-2a554f853dce","Type":"ContainerStarted","Data":"caa8b5ba0a44267d063f00a8908a3bbbe5dbff6793daa0c8d45b6646508d61dd"} Nov 25 23:38:32 crc kubenswrapper[5045]: I1125 23:38:32.430293 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" podStartSLOduration=1.955175557 podStartE2EDuration="2.430278291s" podCreationTimestamp="2025-11-25 23:38:30 +0000 UTC" firstStartedPulling="2025-11-25 23:38:31.551417826 +0000 UTC m=+2367.909076938" lastFinishedPulling="2025-11-25 23:38:32.02652052 +0000 UTC m=+2368.384179672" observedRunningTime="2025-11-25 23:38:32.429566201 +0000 UTC m=+2368.787225323" watchObservedRunningTime="2025-11-25 23:38:32.430278291 +0000 UTC m=+2368.787937413" Nov 25 23:38:34 crc kubenswrapper[5045]: I1125 23:38:34.416105 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:38:34 crc kubenswrapper[5045]: E1125 23:38:34.417579 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:38:43 crc kubenswrapper[5045]: I1125 23:38:43.535989 5045 generic.go:334] "Generic (PLEG): container finished" podID="7747bcca-12c7-4cf7-82e3-2a554f853dce" containerID="9576a0f5e2af4f6a8cf7d3cc4627c197a2c8b722951471147bf06c41a09f9c3d" exitCode=0 Nov 25 23:38:43 crc kubenswrapper[5045]: I1125 23:38:43.536090 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" event={"ID":"7747bcca-12c7-4cf7-82e3-2a554f853dce","Type":"ContainerDied","Data":"9576a0f5e2af4f6a8cf7d3cc4627c197a2c8b722951471147bf06c41a09f9c3d"} Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.027500 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.049451 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7747bcca-12c7-4cf7-82e3-2a554f853dce-ceph\") pod \"7747bcca-12c7-4cf7-82e3-2a554f853dce\" (UID: \"7747bcca-12c7-4cf7-82e3-2a554f853dce\") " Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.049510 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dx584\" (UniqueName: \"kubernetes.io/projected/7747bcca-12c7-4cf7-82e3-2a554f853dce-kube-api-access-dx584\") pod \"7747bcca-12c7-4cf7-82e3-2a554f853dce\" (UID: \"7747bcca-12c7-4cf7-82e3-2a554f853dce\") " Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.049593 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7747bcca-12c7-4cf7-82e3-2a554f853dce-ssh-key-openstack-edpm-ipam\") pod \"7747bcca-12c7-4cf7-82e3-2a554f853dce\" (UID: \"7747bcca-12c7-4cf7-82e3-2a554f853dce\") " Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.049703 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/7747bcca-12c7-4cf7-82e3-2a554f853dce-inventory-0\") pod \"7747bcca-12c7-4cf7-82e3-2a554f853dce\" (UID: \"7747bcca-12c7-4cf7-82e3-2a554f853dce\") " Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.059259 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7747bcca-12c7-4cf7-82e3-2a554f853dce-kube-api-access-dx584" (OuterVolumeSpecName: "kube-api-access-dx584") pod "7747bcca-12c7-4cf7-82e3-2a554f853dce" (UID: "7747bcca-12c7-4cf7-82e3-2a554f853dce"). InnerVolumeSpecName "kube-api-access-dx584". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.067159 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7747bcca-12c7-4cf7-82e3-2a554f853dce-ceph" (OuterVolumeSpecName: "ceph") pod "7747bcca-12c7-4cf7-82e3-2a554f853dce" (UID: "7747bcca-12c7-4cf7-82e3-2a554f853dce"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.080800 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7747bcca-12c7-4cf7-82e3-2a554f853dce-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "7747bcca-12c7-4cf7-82e3-2a554f853dce" (UID: "7747bcca-12c7-4cf7-82e3-2a554f853dce"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.112582 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7747bcca-12c7-4cf7-82e3-2a554f853dce-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "7747bcca-12c7-4cf7-82e3-2a554f853dce" (UID: "7747bcca-12c7-4cf7-82e3-2a554f853dce"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.154063 5045 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/7747bcca-12c7-4cf7-82e3-2a554f853dce-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.154104 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7747bcca-12c7-4cf7-82e3-2a554f853dce-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.154121 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dx584\" (UniqueName: \"kubernetes.io/projected/7747bcca-12c7-4cf7-82e3-2a554f853dce-kube-api-access-dx584\") on node \"crc\" DevicePath \"\"" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.154139 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7747bcca-12c7-4cf7-82e3-2a554f853dce-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.561082 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" event={"ID":"7747bcca-12c7-4cf7-82e3-2a554f853dce","Type":"ContainerDied","Data":"caa8b5ba0a44267d063f00a8908a3bbbe5dbff6793daa0c8d45b6646508d61dd"} Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.561142 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="caa8b5ba0a44267d063f00a8908a3bbbe5dbff6793daa0c8d45b6646508d61dd" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.561253 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-cpmnq" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.661077 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9"] Nov 25 23:38:45 crc kubenswrapper[5045]: E1125 23:38:45.661622 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7747bcca-12c7-4cf7-82e3-2a554f853dce" containerName="ssh-known-hosts-edpm-deployment" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.661651 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="7747bcca-12c7-4cf7-82e3-2a554f853dce" containerName="ssh-known-hosts-edpm-deployment" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.662012 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="7747bcca-12c7-4cf7-82e3-2a554f853dce" containerName="ssh-known-hosts-edpm-deployment" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.663120 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.666611 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.666668 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.668130 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.668499 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.675316 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.685471 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9"] Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.765112 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pp5k8\" (UniqueName: \"kubernetes.io/projected/96f74290-34d3-41c3-8088-09e598444e2e-kube-api-access-pp5k8\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fbvf9\" (UID: \"96f74290-34d3-41c3-8088-09e598444e2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.765188 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/96f74290-34d3-41c3-8088-09e598444e2e-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fbvf9\" (UID: \"96f74290-34d3-41c3-8088-09e598444e2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.765261 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/96f74290-34d3-41c3-8088-09e598444e2e-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fbvf9\" (UID: \"96f74290-34d3-41c3-8088-09e598444e2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.765361 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/96f74290-34d3-41c3-8088-09e598444e2e-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fbvf9\" (UID: \"96f74290-34d3-41c3-8088-09e598444e2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.867047 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/96f74290-34d3-41c3-8088-09e598444e2e-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fbvf9\" (UID: \"96f74290-34d3-41c3-8088-09e598444e2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.867227 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/96f74290-34d3-41c3-8088-09e598444e2e-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fbvf9\" (UID: \"96f74290-34d3-41c3-8088-09e598444e2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.867322 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pp5k8\" (UniqueName: \"kubernetes.io/projected/96f74290-34d3-41c3-8088-09e598444e2e-kube-api-access-pp5k8\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fbvf9\" (UID: \"96f74290-34d3-41c3-8088-09e598444e2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.867395 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/96f74290-34d3-41c3-8088-09e598444e2e-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fbvf9\" (UID: \"96f74290-34d3-41c3-8088-09e598444e2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.871681 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/96f74290-34d3-41c3-8088-09e598444e2e-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fbvf9\" (UID: \"96f74290-34d3-41c3-8088-09e598444e2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.872243 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/96f74290-34d3-41c3-8088-09e598444e2e-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fbvf9\" (UID: \"96f74290-34d3-41c3-8088-09e598444e2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.872475 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/96f74290-34d3-41c3-8088-09e598444e2e-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fbvf9\" (UID: \"96f74290-34d3-41c3-8088-09e598444e2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" Nov 25 23:38:45 crc kubenswrapper[5045]: I1125 23:38:45.890312 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pp5k8\" (UniqueName: \"kubernetes.io/projected/96f74290-34d3-41c3-8088-09e598444e2e-kube-api-access-pp5k8\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fbvf9\" (UID: \"96f74290-34d3-41c3-8088-09e598444e2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" Nov 25 23:38:46 crc kubenswrapper[5045]: I1125 23:38:46.019325 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" Nov 25 23:38:46 crc kubenswrapper[5045]: I1125 23:38:46.633237 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9"] Nov 25 23:38:46 crc kubenswrapper[5045]: I1125 23:38:46.644778 5045 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 23:38:47 crc kubenswrapper[5045]: I1125 23:38:47.590331 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" event={"ID":"96f74290-34d3-41c3-8088-09e598444e2e","Type":"ContainerStarted","Data":"cc8e9feb12e5aa94099c4a40abcf82522d209041b8d9de1dba3917ef23e48e89"} Nov 25 23:38:47 crc kubenswrapper[5045]: I1125 23:38:47.590689 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" event={"ID":"96f74290-34d3-41c3-8088-09e598444e2e","Type":"ContainerStarted","Data":"0083fefaf000da274368ab85946280d3a2ecec75ad4b269e8633367f5f32c1c8"} Nov 25 23:38:47 crc kubenswrapper[5045]: I1125 23:38:47.620096 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" podStartSLOduration=2.1775217 podStartE2EDuration="2.62006477s" podCreationTimestamp="2025-11-25 23:38:45 +0000 UTC" firstStartedPulling="2025-11-25 23:38:46.644512959 +0000 UTC m=+2383.002172081" lastFinishedPulling="2025-11-25 23:38:47.087056029 +0000 UTC m=+2383.444715151" observedRunningTime="2025-11-25 23:38:47.617305772 +0000 UTC m=+2383.974964924" watchObservedRunningTime="2025-11-25 23:38:47.62006477 +0000 UTC m=+2383.977723922" Nov 25 23:38:49 crc kubenswrapper[5045]: I1125 23:38:49.397447 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:38:49 crc kubenswrapper[5045]: E1125 23:38:49.398229 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:38:55 crc kubenswrapper[5045]: I1125 23:38:55.695594 5045 generic.go:334] "Generic (PLEG): container finished" podID="96f74290-34d3-41c3-8088-09e598444e2e" containerID="cc8e9feb12e5aa94099c4a40abcf82522d209041b8d9de1dba3917ef23e48e89" exitCode=0 Nov 25 23:38:55 crc kubenswrapper[5045]: I1125 23:38:55.695697 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" event={"ID":"96f74290-34d3-41c3-8088-09e598444e2e","Type":"ContainerDied","Data":"cc8e9feb12e5aa94099c4a40abcf82522d209041b8d9de1dba3917ef23e48e89"} Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.252755 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.321898 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pp5k8\" (UniqueName: \"kubernetes.io/projected/96f74290-34d3-41c3-8088-09e598444e2e-kube-api-access-pp5k8\") pod \"96f74290-34d3-41c3-8088-09e598444e2e\" (UID: \"96f74290-34d3-41c3-8088-09e598444e2e\") " Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.322128 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/96f74290-34d3-41c3-8088-09e598444e2e-ceph\") pod \"96f74290-34d3-41c3-8088-09e598444e2e\" (UID: \"96f74290-34d3-41c3-8088-09e598444e2e\") " Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.322191 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/96f74290-34d3-41c3-8088-09e598444e2e-ssh-key\") pod \"96f74290-34d3-41c3-8088-09e598444e2e\" (UID: \"96f74290-34d3-41c3-8088-09e598444e2e\") " Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.322301 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/96f74290-34d3-41c3-8088-09e598444e2e-inventory\") pod \"96f74290-34d3-41c3-8088-09e598444e2e\" (UID: \"96f74290-34d3-41c3-8088-09e598444e2e\") " Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.336998 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96f74290-34d3-41c3-8088-09e598444e2e-kube-api-access-pp5k8" (OuterVolumeSpecName: "kube-api-access-pp5k8") pod "96f74290-34d3-41c3-8088-09e598444e2e" (UID: "96f74290-34d3-41c3-8088-09e598444e2e"). InnerVolumeSpecName "kube-api-access-pp5k8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.347901 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96f74290-34d3-41c3-8088-09e598444e2e-ceph" (OuterVolumeSpecName: "ceph") pod "96f74290-34d3-41c3-8088-09e598444e2e" (UID: "96f74290-34d3-41c3-8088-09e598444e2e"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.369987 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96f74290-34d3-41c3-8088-09e598444e2e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "96f74290-34d3-41c3-8088-09e598444e2e" (UID: "96f74290-34d3-41c3-8088-09e598444e2e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.378072 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96f74290-34d3-41c3-8088-09e598444e2e-inventory" (OuterVolumeSpecName: "inventory") pod "96f74290-34d3-41c3-8088-09e598444e2e" (UID: "96f74290-34d3-41c3-8088-09e598444e2e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.427219 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/96f74290-34d3-41c3-8088-09e598444e2e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.427268 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/96f74290-34d3-41c3-8088-09e598444e2e-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.427288 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pp5k8\" (UniqueName: \"kubernetes.io/projected/96f74290-34d3-41c3-8088-09e598444e2e-kube-api-access-pp5k8\") on node \"crc\" DevicePath \"\"" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.427312 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/96f74290-34d3-41c3-8088-09e598444e2e-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.723512 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" event={"ID":"96f74290-34d3-41c3-8088-09e598444e2e","Type":"ContainerDied","Data":"0083fefaf000da274368ab85946280d3a2ecec75ad4b269e8633367f5f32c1c8"} Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.723577 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0083fefaf000da274368ab85946280d3a2ecec75ad4b269e8633367f5f32c1c8" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.723581 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fbvf9" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.830442 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc"] Nov 25 23:38:57 crc kubenswrapper[5045]: E1125 23:38:57.831100 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96f74290-34d3-41c3-8088-09e598444e2e" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.831132 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="96f74290-34d3-41c3-8088-09e598444e2e" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.831444 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="96f74290-34d3-41c3-8088-09e598444e2e" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.832450 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.836618 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.837071 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.837288 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.837527 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.837973 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.847151 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc"] Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.936834 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b7c3894b-8400-492c-b4b9-b45ec555cc68-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc\" (UID: \"b7c3894b-8400-492c-b4b9-b45ec555cc68\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.937404 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b7c3894b-8400-492c-b4b9-b45ec555cc68-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc\" (UID: \"b7c3894b-8400-492c-b4b9-b45ec555cc68\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.937672 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b49g9\" (UniqueName: \"kubernetes.io/projected/b7c3894b-8400-492c-b4b9-b45ec555cc68-kube-api-access-b49g9\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc\" (UID: \"b7c3894b-8400-492c-b4b9-b45ec555cc68\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" Nov 25 23:38:57 crc kubenswrapper[5045]: I1125 23:38:57.937892 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b7c3894b-8400-492c-b4b9-b45ec555cc68-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc\" (UID: \"b7c3894b-8400-492c-b4b9-b45ec555cc68\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" Nov 25 23:38:58 crc kubenswrapper[5045]: I1125 23:38:58.040176 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b7c3894b-8400-492c-b4b9-b45ec555cc68-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc\" (UID: \"b7c3894b-8400-492c-b4b9-b45ec555cc68\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" Nov 25 23:38:58 crc kubenswrapper[5045]: I1125 23:38:58.040567 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b7c3894b-8400-492c-b4b9-b45ec555cc68-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc\" (UID: \"b7c3894b-8400-492c-b4b9-b45ec555cc68\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" Nov 25 23:38:58 crc kubenswrapper[5045]: I1125 23:38:58.040774 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b49g9\" (UniqueName: \"kubernetes.io/projected/b7c3894b-8400-492c-b4b9-b45ec555cc68-kube-api-access-b49g9\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc\" (UID: \"b7c3894b-8400-492c-b4b9-b45ec555cc68\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" Nov 25 23:38:58 crc kubenswrapper[5045]: I1125 23:38:58.040914 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b7c3894b-8400-492c-b4b9-b45ec555cc68-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc\" (UID: \"b7c3894b-8400-492c-b4b9-b45ec555cc68\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" Nov 25 23:38:58 crc kubenswrapper[5045]: I1125 23:38:58.044889 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b7c3894b-8400-492c-b4b9-b45ec555cc68-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc\" (UID: \"b7c3894b-8400-492c-b4b9-b45ec555cc68\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" Nov 25 23:38:58 crc kubenswrapper[5045]: I1125 23:38:58.045398 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b7c3894b-8400-492c-b4b9-b45ec555cc68-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc\" (UID: \"b7c3894b-8400-492c-b4b9-b45ec555cc68\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" Nov 25 23:38:58 crc kubenswrapper[5045]: I1125 23:38:58.046551 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b7c3894b-8400-492c-b4b9-b45ec555cc68-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc\" (UID: \"b7c3894b-8400-492c-b4b9-b45ec555cc68\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" Nov 25 23:38:58 crc kubenswrapper[5045]: I1125 23:38:58.065753 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b49g9\" (UniqueName: \"kubernetes.io/projected/b7c3894b-8400-492c-b4b9-b45ec555cc68-kube-api-access-b49g9\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc\" (UID: \"b7c3894b-8400-492c-b4b9-b45ec555cc68\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" Nov 25 23:38:58 crc kubenswrapper[5045]: I1125 23:38:58.161326 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" Nov 25 23:38:58 crc kubenswrapper[5045]: I1125 23:38:58.765798 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc"] Nov 25 23:38:59 crc kubenswrapper[5045]: I1125 23:38:59.751081 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" event={"ID":"b7c3894b-8400-492c-b4b9-b45ec555cc68","Type":"ContainerStarted","Data":"301aebc650be666013b34637966a5fbc1cec9fa35df05d13cd64cf0cea8f1240"} Nov 25 23:38:59 crc kubenswrapper[5045]: I1125 23:38:59.751557 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" event={"ID":"b7c3894b-8400-492c-b4b9-b45ec555cc68","Type":"ContainerStarted","Data":"c9120bd10ed5e22c46503b741a4b0e4b79a4645df0b744e4732589f98f36e1f5"} Nov 25 23:38:59 crc kubenswrapper[5045]: I1125 23:38:59.776044 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" podStartSLOduration=2.358563618 podStartE2EDuration="2.776020964s" podCreationTimestamp="2025-11-25 23:38:57 +0000 UTC" firstStartedPulling="2025-11-25 23:38:58.777401798 +0000 UTC m=+2395.135060930" lastFinishedPulling="2025-11-25 23:38:59.194859134 +0000 UTC m=+2395.552518276" observedRunningTime="2025-11-25 23:38:59.769966726 +0000 UTC m=+2396.127625868" watchObservedRunningTime="2025-11-25 23:38:59.776020964 +0000 UTC m=+2396.133680106" Nov 25 23:39:00 crc kubenswrapper[5045]: I1125 23:39:00.397658 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:39:00 crc kubenswrapper[5045]: E1125 23:39:00.398293 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:39:09 crc kubenswrapper[5045]: I1125 23:39:09.875284 5045 generic.go:334] "Generic (PLEG): container finished" podID="b7c3894b-8400-492c-b4b9-b45ec555cc68" containerID="301aebc650be666013b34637966a5fbc1cec9fa35df05d13cd64cf0cea8f1240" exitCode=0 Nov 25 23:39:09 crc kubenswrapper[5045]: I1125 23:39:09.875375 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" event={"ID":"b7c3894b-8400-492c-b4b9-b45ec555cc68","Type":"ContainerDied","Data":"301aebc650be666013b34637966a5fbc1cec9fa35df05d13cd64cf0cea8f1240"} Nov 25 23:39:11 crc kubenswrapper[5045]: I1125 23:39:11.374510 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" Nov 25 23:39:11 crc kubenswrapper[5045]: I1125 23:39:11.401273 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:39:11 crc kubenswrapper[5045]: E1125 23:39:11.401829 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:39:11 crc kubenswrapper[5045]: I1125 23:39:11.441813 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b7c3894b-8400-492c-b4b9-b45ec555cc68-ceph\") pod \"b7c3894b-8400-492c-b4b9-b45ec555cc68\" (UID: \"b7c3894b-8400-492c-b4b9-b45ec555cc68\") " Nov 25 23:39:11 crc kubenswrapper[5045]: I1125 23:39:11.441989 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b7c3894b-8400-492c-b4b9-b45ec555cc68-inventory\") pod \"b7c3894b-8400-492c-b4b9-b45ec555cc68\" (UID: \"b7c3894b-8400-492c-b4b9-b45ec555cc68\") " Nov 25 23:39:11 crc kubenswrapper[5045]: I1125 23:39:11.442050 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b49g9\" (UniqueName: \"kubernetes.io/projected/b7c3894b-8400-492c-b4b9-b45ec555cc68-kube-api-access-b49g9\") pod \"b7c3894b-8400-492c-b4b9-b45ec555cc68\" (UID: \"b7c3894b-8400-492c-b4b9-b45ec555cc68\") " Nov 25 23:39:11 crc kubenswrapper[5045]: I1125 23:39:11.442150 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b7c3894b-8400-492c-b4b9-b45ec555cc68-ssh-key\") pod \"b7c3894b-8400-492c-b4b9-b45ec555cc68\" (UID: \"b7c3894b-8400-492c-b4b9-b45ec555cc68\") " Nov 25 23:39:11 crc kubenswrapper[5045]: I1125 23:39:11.452203 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7c3894b-8400-492c-b4b9-b45ec555cc68-kube-api-access-b49g9" (OuterVolumeSpecName: "kube-api-access-b49g9") pod "b7c3894b-8400-492c-b4b9-b45ec555cc68" (UID: "b7c3894b-8400-492c-b4b9-b45ec555cc68"). InnerVolumeSpecName "kube-api-access-b49g9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:39:11 crc kubenswrapper[5045]: I1125 23:39:11.456003 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7c3894b-8400-492c-b4b9-b45ec555cc68-ceph" (OuterVolumeSpecName: "ceph") pod "b7c3894b-8400-492c-b4b9-b45ec555cc68" (UID: "b7c3894b-8400-492c-b4b9-b45ec555cc68"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:39:11 crc kubenswrapper[5045]: I1125 23:39:11.494596 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7c3894b-8400-492c-b4b9-b45ec555cc68-inventory" (OuterVolumeSpecName: "inventory") pod "b7c3894b-8400-492c-b4b9-b45ec555cc68" (UID: "b7c3894b-8400-492c-b4b9-b45ec555cc68"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:39:11 crc kubenswrapper[5045]: I1125 23:39:11.495410 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7c3894b-8400-492c-b4b9-b45ec555cc68-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b7c3894b-8400-492c-b4b9-b45ec555cc68" (UID: "b7c3894b-8400-492c-b4b9-b45ec555cc68"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:39:11 crc kubenswrapper[5045]: I1125 23:39:11.545258 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b7c3894b-8400-492c-b4b9-b45ec555cc68-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:39:11 crc kubenswrapper[5045]: I1125 23:39:11.545317 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b49g9\" (UniqueName: \"kubernetes.io/projected/b7c3894b-8400-492c-b4b9-b45ec555cc68-kube-api-access-b49g9\") on node \"crc\" DevicePath \"\"" Nov 25 23:39:11 crc kubenswrapper[5045]: I1125 23:39:11.545341 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b7c3894b-8400-492c-b4b9-b45ec555cc68-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:39:11 crc kubenswrapper[5045]: I1125 23:39:11.545404 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b7c3894b-8400-492c-b4b9-b45ec555cc68-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:39:11 crc kubenswrapper[5045]: I1125 23:39:11.899049 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" event={"ID":"b7c3894b-8400-492c-b4b9-b45ec555cc68","Type":"ContainerDied","Data":"c9120bd10ed5e22c46503b741a4b0e4b79a4645df0b744e4732589f98f36e1f5"} Nov 25 23:39:11 crc kubenswrapper[5045]: I1125 23:39:11.899107 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9120bd10ed5e22c46503b741a4b0e4b79a4645df0b744e4732589f98f36e1f5" Nov 25 23:39:11 crc kubenswrapper[5045]: I1125 23:39:11.899147 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.017959 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng"] Nov 25 23:39:12 crc kubenswrapper[5045]: E1125 23:39:12.018510 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7c3894b-8400-492c-b4b9-b45ec555cc68" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.018540 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7c3894b-8400-492c-b4b9-b45ec555cc68" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.019122 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7c3894b-8400-492c-b4b9-b45ec555cc68" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.020119 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.022990 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.023286 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.023782 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.023937 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.026293 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.028016 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.028619 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.035907 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng"] Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.036185 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.060790 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.060874 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.060934 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.060967 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.061005 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.061075 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.061181 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.061234 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.061327 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.061384 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.061437 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.061476 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.061516 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65b25\" (UniqueName: \"kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-kube-api-access-65b25\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.163086 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.163149 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.163185 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65b25\" (UniqueName: \"kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-kube-api-access-65b25\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.163236 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.163276 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.163306 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.163335 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.164049 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.164124 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.164312 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.164645 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.164825 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.164909 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.167257 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.168479 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.168692 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.169405 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.171276 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.172247 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.172541 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.173498 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.175415 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.181815 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.193521 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.201362 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.201648 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65b25\" (UniqueName: \"kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-kube-api-access-65b25\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-knvng\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:12 crc kubenswrapper[5045]: I1125 23:39:12.351422 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:13 crc kubenswrapper[5045]: W1125 23:39:13.025696 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4dcec4cb_07ca_4c0a_afaa_672e534cf521.slice/crio-f2e7aebbcb022fb79d46dce06d456b8ab64a648976cc7f02eb1181146a73ada1 WatchSource:0}: Error finding container f2e7aebbcb022fb79d46dce06d456b8ab64a648976cc7f02eb1181146a73ada1: Status 404 returned error can't find the container with id f2e7aebbcb022fb79d46dce06d456b8ab64a648976cc7f02eb1181146a73ada1 Nov 25 23:39:13 crc kubenswrapper[5045]: I1125 23:39:13.028938 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng"] Nov 25 23:39:13 crc kubenswrapper[5045]: I1125 23:39:13.928849 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" event={"ID":"4dcec4cb-07ca-4c0a-afaa-672e534cf521","Type":"ContainerStarted","Data":"f2e7aebbcb022fb79d46dce06d456b8ab64a648976cc7f02eb1181146a73ada1"} Nov 25 23:39:14 crc kubenswrapper[5045]: I1125 23:39:14.946099 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" event={"ID":"4dcec4cb-07ca-4c0a-afaa-672e534cf521","Type":"ContainerStarted","Data":"876465e2377490072c410e20dafbcaae8f488fa83b2c667b9baf36ce657ccd83"} Nov 25 23:39:14 crc kubenswrapper[5045]: I1125 23:39:14.987021 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" podStartSLOduration=3.219358323 podStartE2EDuration="3.986991817s" podCreationTimestamp="2025-11-25 23:39:11 +0000 UTC" firstStartedPulling="2025-11-25 23:39:13.030663381 +0000 UTC m=+2409.388322493" lastFinishedPulling="2025-11-25 23:39:13.798296875 +0000 UTC m=+2410.155955987" observedRunningTime="2025-11-25 23:39:14.97415035 +0000 UTC m=+2411.331809542" watchObservedRunningTime="2025-11-25 23:39:14.986991817 +0000 UTC m=+2411.344650969" Nov 25 23:39:24 crc kubenswrapper[5045]: I1125 23:39:24.397482 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:39:24 crc kubenswrapper[5045]: E1125 23:39:24.398768 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:39:39 crc kubenswrapper[5045]: I1125 23:39:39.397610 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:39:39 crc kubenswrapper[5045]: E1125 23:39:39.399522 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:39:52 crc kubenswrapper[5045]: I1125 23:39:52.391490 5045 generic.go:334] "Generic (PLEG): container finished" podID="4dcec4cb-07ca-4c0a-afaa-672e534cf521" containerID="876465e2377490072c410e20dafbcaae8f488fa83b2c667b9baf36ce657ccd83" exitCode=0 Nov 25 23:39:52 crc kubenswrapper[5045]: I1125 23:39:52.391619 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" event={"ID":"4dcec4cb-07ca-4c0a-afaa-672e534cf521","Type":"ContainerDied","Data":"876465e2377490072c410e20dafbcaae8f488fa83b2c667b9baf36ce657ccd83"} Nov 25 23:39:52 crc kubenswrapper[5045]: I1125 23:39:52.397109 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:39:52 crc kubenswrapper[5045]: E1125 23:39:52.397409 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.803109 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.947045 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-ovn-combined-ca-bundle\") pod \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.947124 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-openstack-edpm-ipam-ovn-default-certs-0\") pod \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.947254 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-neutron-metadata-combined-ca-bundle\") pod \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.947286 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.947344 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-bootstrap-combined-ca-bundle\") pod \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.947379 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-inventory\") pod \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.947413 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-libvirt-combined-ca-bundle\") pod \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.947440 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.947487 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-ssh-key\") pod \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.947544 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-ceph\") pod \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.947583 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-65b25\" (UniqueName: \"kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-kube-api-access-65b25\") pod \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.947621 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-nova-combined-ca-bundle\") pod \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.947667 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-repo-setup-combined-ca-bundle\") pod \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\" (UID: \"4dcec4cb-07ca-4c0a-afaa-672e534cf521\") " Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.956398 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "4dcec4cb-07ca-4c0a-afaa-672e534cf521" (UID: "4dcec4cb-07ca-4c0a-afaa-672e534cf521"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.956435 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "4dcec4cb-07ca-4c0a-afaa-672e534cf521" (UID: "4dcec4cb-07ca-4c0a-afaa-672e534cf521"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.957525 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "4dcec4cb-07ca-4c0a-afaa-672e534cf521" (UID: "4dcec4cb-07ca-4c0a-afaa-672e534cf521"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.957574 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "4dcec4cb-07ca-4c0a-afaa-672e534cf521" (UID: "4dcec4cb-07ca-4c0a-afaa-672e534cf521"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.957850 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "4dcec4cb-07ca-4c0a-afaa-672e534cf521" (UID: "4dcec4cb-07ca-4c0a-afaa-672e534cf521"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.959651 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-ceph" (OuterVolumeSpecName: "ceph") pod "4dcec4cb-07ca-4c0a-afaa-672e534cf521" (UID: "4dcec4cb-07ca-4c0a-afaa-672e534cf521"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.960542 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "4dcec4cb-07ca-4c0a-afaa-672e534cf521" (UID: "4dcec4cb-07ca-4c0a-afaa-672e534cf521"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.961128 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "4dcec4cb-07ca-4c0a-afaa-672e534cf521" (UID: "4dcec4cb-07ca-4c0a-afaa-672e534cf521"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.965009 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "4dcec4cb-07ca-4c0a-afaa-672e534cf521" (UID: "4dcec4cb-07ca-4c0a-afaa-672e534cf521"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.965273 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "4dcec4cb-07ca-4c0a-afaa-672e534cf521" (UID: "4dcec4cb-07ca-4c0a-afaa-672e534cf521"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.965356 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-kube-api-access-65b25" (OuterVolumeSpecName: "kube-api-access-65b25") pod "4dcec4cb-07ca-4c0a-afaa-672e534cf521" (UID: "4dcec4cb-07ca-4c0a-afaa-672e534cf521"). InnerVolumeSpecName "kube-api-access-65b25". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.987067 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-inventory" (OuterVolumeSpecName: "inventory") pod "4dcec4cb-07ca-4c0a-afaa-672e534cf521" (UID: "4dcec4cb-07ca-4c0a-afaa-672e534cf521"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:39:53 crc kubenswrapper[5045]: I1125 23:39:53.987768 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4dcec4cb-07ca-4c0a-afaa-672e534cf521" (UID: "4dcec4cb-07ca-4c0a-afaa-672e534cf521"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.050852 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.050920 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-65b25\" (UniqueName: \"kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-kube-api-access-65b25\") on node \"crc\" DevicePath \"\"" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.050945 5045 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.050991 5045 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.051008 5045 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.051029 5045 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.051050 5045 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.051069 5045 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.051093 5045 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.051112 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.051138 5045 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.051155 5045 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4dcec4cb-07ca-4c0a-afaa-672e534cf521-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.051172 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4dcec4cb-07ca-4c0a-afaa-672e534cf521-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.422509 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.424685 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-knvng" event={"ID":"4dcec4cb-07ca-4c0a-afaa-672e534cf521","Type":"ContainerDied","Data":"f2e7aebbcb022fb79d46dce06d456b8ab64a648976cc7f02eb1181146a73ada1"} Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.424873 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f2e7aebbcb022fb79d46dce06d456b8ab64a648976cc7f02eb1181146a73ada1" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.567063 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524"] Nov 25 23:39:54 crc kubenswrapper[5045]: E1125 23:39:54.567529 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dcec4cb-07ca-4c0a-afaa-672e534cf521" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.567557 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dcec4cb-07ca-4c0a-afaa-672e534cf521" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.567776 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="4dcec4cb-07ca-4c0a-afaa-672e534cf521" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.568562 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.572611 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.574252 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.574425 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.574582 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.574829 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.577192 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524"] Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.769792 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a98dde15-bff6-4ed8-8216-142372401818-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-52524\" (UID: \"a98dde15-bff6-4ed8-8216-142372401818\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.770069 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a98dde15-bff6-4ed8-8216-142372401818-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-52524\" (UID: \"a98dde15-bff6-4ed8-8216-142372401818\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.770148 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mp6jf\" (UniqueName: \"kubernetes.io/projected/a98dde15-bff6-4ed8-8216-142372401818-kube-api-access-mp6jf\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-52524\" (UID: \"a98dde15-bff6-4ed8-8216-142372401818\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.770313 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a98dde15-bff6-4ed8-8216-142372401818-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-52524\" (UID: \"a98dde15-bff6-4ed8-8216-142372401818\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.872129 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a98dde15-bff6-4ed8-8216-142372401818-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-52524\" (UID: \"a98dde15-bff6-4ed8-8216-142372401818\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.872181 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a98dde15-bff6-4ed8-8216-142372401818-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-52524\" (UID: \"a98dde15-bff6-4ed8-8216-142372401818\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.872327 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a98dde15-bff6-4ed8-8216-142372401818-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-52524\" (UID: \"a98dde15-bff6-4ed8-8216-142372401818\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.872372 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mp6jf\" (UniqueName: \"kubernetes.io/projected/a98dde15-bff6-4ed8-8216-142372401818-kube-api-access-mp6jf\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-52524\" (UID: \"a98dde15-bff6-4ed8-8216-142372401818\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.878507 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a98dde15-bff6-4ed8-8216-142372401818-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-52524\" (UID: \"a98dde15-bff6-4ed8-8216-142372401818\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.879477 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a98dde15-bff6-4ed8-8216-142372401818-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-52524\" (UID: \"a98dde15-bff6-4ed8-8216-142372401818\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.882296 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a98dde15-bff6-4ed8-8216-142372401818-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-52524\" (UID: \"a98dde15-bff6-4ed8-8216-142372401818\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" Nov 25 23:39:54 crc kubenswrapper[5045]: I1125 23:39:54.905838 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mp6jf\" (UniqueName: \"kubernetes.io/projected/a98dde15-bff6-4ed8-8216-142372401818-kube-api-access-mp6jf\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-52524\" (UID: \"a98dde15-bff6-4ed8-8216-142372401818\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" Nov 25 23:39:55 crc kubenswrapper[5045]: I1125 23:39:55.195474 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" Nov 25 23:39:55 crc kubenswrapper[5045]: I1125 23:39:55.853095 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524"] Nov 25 23:39:56 crc kubenswrapper[5045]: I1125 23:39:56.445772 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" event={"ID":"a98dde15-bff6-4ed8-8216-142372401818","Type":"ContainerStarted","Data":"fc000ce657f1d977a8ddba7014a93b0436a8cc41fe238013e2b6ccd4c3e29139"} Nov 25 23:39:57 crc kubenswrapper[5045]: I1125 23:39:57.455912 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" event={"ID":"a98dde15-bff6-4ed8-8216-142372401818","Type":"ContainerStarted","Data":"1430b8aa4a27ab33e212443084e1c6cf839b027172a44befa4e69bff44f7e07c"} Nov 25 23:39:57 crc kubenswrapper[5045]: I1125 23:39:57.487096 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" podStartSLOduration=2.805288084 podStartE2EDuration="3.48707048s" podCreationTimestamp="2025-11-25 23:39:54 +0000 UTC" firstStartedPulling="2025-11-25 23:39:55.866991374 +0000 UTC m=+2452.224650496" lastFinishedPulling="2025-11-25 23:39:56.54877374 +0000 UTC m=+2452.906432892" observedRunningTime="2025-11-25 23:39:57.473179574 +0000 UTC m=+2453.830838686" watchObservedRunningTime="2025-11-25 23:39:57.48707048 +0000 UTC m=+2453.844729632" Nov 25 23:40:03 crc kubenswrapper[5045]: I1125 23:40:03.520505 5045 generic.go:334] "Generic (PLEG): container finished" podID="a98dde15-bff6-4ed8-8216-142372401818" containerID="1430b8aa4a27ab33e212443084e1c6cf839b027172a44befa4e69bff44f7e07c" exitCode=0 Nov 25 23:40:03 crc kubenswrapper[5045]: I1125 23:40:03.520574 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" event={"ID":"a98dde15-bff6-4ed8-8216-142372401818","Type":"ContainerDied","Data":"1430b8aa4a27ab33e212443084e1c6cf839b027172a44befa4e69bff44f7e07c"} Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.051148 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.106255 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a98dde15-bff6-4ed8-8216-142372401818-inventory\") pod \"a98dde15-bff6-4ed8-8216-142372401818\" (UID: \"a98dde15-bff6-4ed8-8216-142372401818\") " Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.106314 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a98dde15-bff6-4ed8-8216-142372401818-ceph\") pod \"a98dde15-bff6-4ed8-8216-142372401818\" (UID: \"a98dde15-bff6-4ed8-8216-142372401818\") " Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.106502 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a98dde15-bff6-4ed8-8216-142372401818-ssh-key\") pod \"a98dde15-bff6-4ed8-8216-142372401818\" (UID: \"a98dde15-bff6-4ed8-8216-142372401818\") " Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.106643 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mp6jf\" (UniqueName: \"kubernetes.io/projected/a98dde15-bff6-4ed8-8216-142372401818-kube-api-access-mp6jf\") pod \"a98dde15-bff6-4ed8-8216-142372401818\" (UID: \"a98dde15-bff6-4ed8-8216-142372401818\") " Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.112852 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a98dde15-bff6-4ed8-8216-142372401818-ceph" (OuterVolumeSpecName: "ceph") pod "a98dde15-bff6-4ed8-8216-142372401818" (UID: "a98dde15-bff6-4ed8-8216-142372401818"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.112917 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a98dde15-bff6-4ed8-8216-142372401818-kube-api-access-mp6jf" (OuterVolumeSpecName: "kube-api-access-mp6jf") pod "a98dde15-bff6-4ed8-8216-142372401818" (UID: "a98dde15-bff6-4ed8-8216-142372401818"). InnerVolumeSpecName "kube-api-access-mp6jf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.145926 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a98dde15-bff6-4ed8-8216-142372401818-inventory" (OuterVolumeSpecName: "inventory") pod "a98dde15-bff6-4ed8-8216-142372401818" (UID: "a98dde15-bff6-4ed8-8216-142372401818"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.149981 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a98dde15-bff6-4ed8-8216-142372401818-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a98dde15-bff6-4ed8-8216-142372401818" (UID: "a98dde15-bff6-4ed8-8216-142372401818"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.211226 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a98dde15-bff6-4ed8-8216-142372401818-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.211263 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mp6jf\" (UniqueName: \"kubernetes.io/projected/a98dde15-bff6-4ed8-8216-142372401818-kube-api-access-mp6jf\") on node \"crc\" DevicePath \"\"" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.211279 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a98dde15-bff6-4ed8-8216-142372401818-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.211293 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a98dde15-bff6-4ed8-8216-142372401818-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.542777 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" event={"ID":"a98dde15-bff6-4ed8-8216-142372401818","Type":"ContainerDied","Data":"fc000ce657f1d977a8ddba7014a93b0436a8cc41fe238013e2b6ccd4c3e29139"} Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.542834 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fc000ce657f1d977a8ddba7014a93b0436a8cc41fe238013e2b6ccd4c3e29139" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.542879 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-52524" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.663341 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n"] Nov 25 23:40:05 crc kubenswrapper[5045]: E1125 23:40:05.664360 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a98dde15-bff6-4ed8-8216-142372401818" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.664391 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="a98dde15-bff6-4ed8-8216-142372401818" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.665087 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="a98dde15-bff6-4ed8-8216-142372401818" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.670033 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.673420 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.673441 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.674205 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.674319 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.674496 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.675546 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.689036 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n"] Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.725706 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-2p27n\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.725803 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-2p27n\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.725843 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jkf7\" (UniqueName: \"kubernetes.io/projected/9951b664-25eb-49a9-ba49-6bd594f857df-kube-api-access-7jkf7\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-2p27n\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.725933 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-2p27n\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.725994 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-2p27n\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.726198 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/9951b664-25eb-49a9-ba49-6bd594f857df-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-2p27n\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.827573 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-2p27n\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.827658 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-2p27n\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.827749 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jkf7\" (UniqueName: \"kubernetes.io/projected/9951b664-25eb-49a9-ba49-6bd594f857df-kube-api-access-7jkf7\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-2p27n\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.827815 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-2p27n\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.827864 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-2p27n\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.827907 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/9951b664-25eb-49a9-ba49-6bd594f857df-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-2p27n\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.829033 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/9951b664-25eb-49a9-ba49-6bd594f857df-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-2p27n\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.833345 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-2p27n\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.834165 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-2p27n\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.835730 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-2p27n\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.837366 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-2p27n\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:40:05 crc kubenswrapper[5045]: I1125 23:40:05.844981 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jkf7\" (UniqueName: \"kubernetes.io/projected/9951b664-25eb-49a9-ba49-6bd594f857df-kube-api-access-7jkf7\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-2p27n\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:40:06 crc kubenswrapper[5045]: I1125 23:40:06.001297 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:40:06 crc kubenswrapper[5045]: I1125 23:40:06.683520 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n"] Nov 25 23:40:06 crc kubenswrapper[5045]: W1125 23:40:06.705467 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9951b664_25eb_49a9_ba49_6bd594f857df.slice/crio-2c96090b680d9084faa086f49b2a0ee1a64de23f07fbdb3d68525088adaab400 WatchSource:0}: Error finding container 2c96090b680d9084faa086f49b2a0ee1a64de23f07fbdb3d68525088adaab400: Status 404 returned error can't find the container with id 2c96090b680d9084faa086f49b2a0ee1a64de23f07fbdb3d68525088adaab400 Nov 25 23:40:07 crc kubenswrapper[5045]: I1125 23:40:07.397283 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:40:07 crc kubenswrapper[5045]: E1125 23:40:07.397842 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:40:07 crc kubenswrapper[5045]: I1125 23:40:07.568093 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" event={"ID":"9951b664-25eb-49a9-ba49-6bd594f857df","Type":"ContainerStarted","Data":"5a140a11befc52aeec8b3b39c82a5fe0532773f90a510b260ba4e1f82fe7e0e9"} Nov 25 23:40:07 crc kubenswrapper[5045]: I1125 23:40:07.568153 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" event={"ID":"9951b664-25eb-49a9-ba49-6bd594f857df","Type":"ContainerStarted","Data":"2c96090b680d9084faa086f49b2a0ee1a64de23f07fbdb3d68525088adaab400"} Nov 25 23:40:07 crc kubenswrapper[5045]: I1125 23:40:07.604436 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" podStartSLOduration=2.096556542 podStartE2EDuration="2.604413341s" podCreationTimestamp="2025-11-25 23:40:05 +0000 UTC" firstStartedPulling="2025-11-25 23:40:06.709570809 +0000 UTC m=+2463.067229911" lastFinishedPulling="2025-11-25 23:40:07.217427588 +0000 UTC m=+2463.575086710" observedRunningTime="2025-11-25 23:40:07.591052802 +0000 UTC m=+2463.948711924" watchObservedRunningTime="2025-11-25 23:40:07.604413341 +0000 UTC m=+2463.962072463" Nov 25 23:40:19 crc kubenswrapper[5045]: I1125 23:40:19.396976 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:40:19 crc kubenswrapper[5045]: E1125 23:40:19.398365 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:40:30 crc kubenswrapper[5045]: I1125 23:40:30.396678 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:40:30 crc kubenswrapper[5045]: E1125 23:40:30.398084 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:40:45 crc kubenswrapper[5045]: I1125 23:40:45.397604 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:40:45 crc kubenswrapper[5045]: E1125 23:40:45.399052 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:41:00 crc kubenswrapper[5045]: I1125 23:41:00.401422 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:41:00 crc kubenswrapper[5045]: E1125 23:41:00.402252 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:41:12 crc kubenswrapper[5045]: I1125 23:41:12.404462 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:41:12 crc kubenswrapper[5045]: E1125 23:41:12.405685 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:41:24 crc kubenswrapper[5045]: I1125 23:41:24.402091 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:41:24 crc kubenswrapper[5045]: E1125 23:41:24.402922 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:41:29 crc kubenswrapper[5045]: I1125 23:41:29.960702 5045 generic.go:334] "Generic (PLEG): container finished" podID="9951b664-25eb-49a9-ba49-6bd594f857df" containerID="5a140a11befc52aeec8b3b39c82a5fe0532773f90a510b260ba4e1f82fe7e0e9" exitCode=0 Nov 25 23:41:29 crc kubenswrapper[5045]: I1125 23:41:29.960769 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" event={"ID":"9951b664-25eb-49a9-ba49-6bd594f857df","Type":"ContainerDied","Data":"5a140a11befc52aeec8b3b39c82a5fe0532773f90a510b260ba4e1f82fe7e0e9"} Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.422658 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.484420 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7jkf7\" (UniqueName: \"kubernetes.io/projected/9951b664-25eb-49a9-ba49-6bd594f857df-kube-api-access-7jkf7\") pod \"9951b664-25eb-49a9-ba49-6bd594f857df\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.484854 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-ceph\") pod \"9951b664-25eb-49a9-ba49-6bd594f857df\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.484956 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/9951b664-25eb-49a9-ba49-6bd594f857df-ovncontroller-config-0\") pod \"9951b664-25eb-49a9-ba49-6bd594f857df\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.484992 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-ovn-combined-ca-bundle\") pod \"9951b664-25eb-49a9-ba49-6bd594f857df\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.485070 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-inventory\") pod \"9951b664-25eb-49a9-ba49-6bd594f857df\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.485136 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-ssh-key\") pod \"9951b664-25eb-49a9-ba49-6bd594f857df\" (UID: \"9951b664-25eb-49a9-ba49-6bd594f857df\") " Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.492143 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-ceph" (OuterVolumeSpecName: "ceph") pod "9951b664-25eb-49a9-ba49-6bd594f857df" (UID: "9951b664-25eb-49a9-ba49-6bd594f857df"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.492189 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9951b664-25eb-49a9-ba49-6bd594f857df-kube-api-access-7jkf7" (OuterVolumeSpecName: "kube-api-access-7jkf7") pod "9951b664-25eb-49a9-ba49-6bd594f857df" (UID: "9951b664-25eb-49a9-ba49-6bd594f857df"). InnerVolumeSpecName "kube-api-access-7jkf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.492535 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "9951b664-25eb-49a9-ba49-6bd594f857df" (UID: "9951b664-25eb-49a9-ba49-6bd594f857df"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.517559 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-inventory" (OuterVolumeSpecName: "inventory") pod "9951b664-25eb-49a9-ba49-6bd594f857df" (UID: "9951b664-25eb-49a9-ba49-6bd594f857df"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.518626 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9951b664-25eb-49a9-ba49-6bd594f857df" (UID: "9951b664-25eb-49a9-ba49-6bd594f857df"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.531325 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9951b664-25eb-49a9-ba49-6bd594f857df-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "9951b664-25eb-49a9-ba49-6bd594f857df" (UID: "9951b664-25eb-49a9-ba49-6bd594f857df"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.587834 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.587875 5045 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/9951b664-25eb-49a9-ba49-6bd594f857df-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.587894 5045 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.587909 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.587924 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9951b664-25eb-49a9-ba49-6bd594f857df-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.587937 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7jkf7\" (UniqueName: \"kubernetes.io/projected/9951b664-25eb-49a9-ba49-6bd594f857df-kube-api-access-7jkf7\") on node \"crc\" DevicePath \"\"" Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.995607 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" event={"ID":"9951b664-25eb-49a9-ba49-6bd594f857df","Type":"ContainerDied","Data":"2c96090b680d9084faa086f49b2a0ee1a64de23f07fbdb3d68525088adaab400"} Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.996306 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2c96090b680d9084faa086f49b2a0ee1a64de23f07fbdb3d68525088adaab400" Nov 25 23:41:31 crc kubenswrapper[5045]: I1125 23:41:31.995688 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-2p27n" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.097180 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6"] Nov 25 23:41:32 crc kubenswrapper[5045]: E1125 23:41:32.097861 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9951b664-25eb-49a9-ba49-6bd594f857df" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.097948 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="9951b664-25eb-49a9-ba49-6bd594f857df" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.098162 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="9951b664-25eb-49a9-ba49-6bd594f857df" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.098906 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.108775 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6"] Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.112887 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.113067 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.113197 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.113631 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.113961 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.114170 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.114322 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.200569 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.200650 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8j8f\" (UniqueName: \"kubernetes.io/projected/1f6014b1-5500-49d6-a729-dfb677b8a1cc-kube-api-access-x8j8f\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.200705 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.200823 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.200860 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.200879 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.201035 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.303067 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.303884 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.304099 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8j8f\" (UniqueName: \"kubernetes.io/projected/1f6014b1-5500-49d6-a729-dfb677b8a1cc-kube-api-access-x8j8f\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.304218 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.304375 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.305320 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.305382 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.308825 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.309675 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.310506 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.310596 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.317063 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.318410 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.338230 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8j8f\" (UniqueName: \"kubernetes.io/projected/1f6014b1-5500-49d6-a729-dfb677b8a1cc-kube-api-access-x8j8f\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.427420 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:41:32 crc kubenswrapper[5045]: I1125 23:41:32.778978 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6"] Nov 25 23:41:33 crc kubenswrapper[5045]: I1125 23:41:33.007610 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" event={"ID":"1f6014b1-5500-49d6-a729-dfb677b8a1cc","Type":"ContainerStarted","Data":"fde79c2a0c92cf1a71a7878e7468c95d0ed041f0afd152addc0823f89e17ab35"} Nov 25 23:41:34 crc kubenswrapper[5045]: I1125 23:41:34.022284 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" event={"ID":"1f6014b1-5500-49d6-a729-dfb677b8a1cc","Type":"ContainerStarted","Data":"585104d1ac0be05a7b066c033d5c1a09f60d107cd1936e30ffa5909429635d11"} Nov 25 23:41:34 crc kubenswrapper[5045]: I1125 23:41:34.065816 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" podStartSLOduration=1.618272074 podStartE2EDuration="2.065775514s" podCreationTimestamp="2025-11-25 23:41:32 +0000 UTC" firstStartedPulling="2025-11-25 23:41:32.786197222 +0000 UTC m=+2549.143856374" lastFinishedPulling="2025-11-25 23:41:33.233700702 +0000 UTC m=+2549.591359814" observedRunningTime="2025-11-25 23:41:34.055174986 +0000 UTC m=+2550.412834138" watchObservedRunningTime="2025-11-25 23:41:34.065775514 +0000 UTC m=+2550.423434706" Nov 25 23:41:37 crc kubenswrapper[5045]: I1125 23:41:37.396994 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:41:37 crc kubenswrapper[5045]: E1125 23:41:37.397791 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:41:49 crc kubenswrapper[5045]: I1125 23:41:49.731026 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xrrmd"] Nov 25 23:41:49 crc kubenswrapper[5045]: I1125 23:41:49.735823 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xrrmd" Nov 25 23:41:49 crc kubenswrapper[5045]: I1125 23:41:49.746002 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xrrmd"] Nov 25 23:41:49 crc kubenswrapper[5045]: I1125 23:41:49.891941 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8e3366f-0e9f-4890-be50-025446a20168-utilities\") pod \"community-operators-xrrmd\" (UID: \"f8e3366f-0e9f-4890-be50-025446a20168\") " pod="openshift-marketplace/community-operators-xrrmd" Nov 25 23:41:49 crc kubenswrapper[5045]: I1125 23:41:49.892406 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wph2\" (UniqueName: \"kubernetes.io/projected/f8e3366f-0e9f-4890-be50-025446a20168-kube-api-access-2wph2\") pod \"community-operators-xrrmd\" (UID: \"f8e3366f-0e9f-4890-be50-025446a20168\") " pod="openshift-marketplace/community-operators-xrrmd" Nov 25 23:41:49 crc kubenswrapper[5045]: I1125 23:41:49.892760 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8e3366f-0e9f-4890-be50-025446a20168-catalog-content\") pod \"community-operators-xrrmd\" (UID: \"f8e3366f-0e9f-4890-be50-025446a20168\") " pod="openshift-marketplace/community-operators-xrrmd" Nov 25 23:41:49 crc kubenswrapper[5045]: I1125 23:41:49.995280 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8e3366f-0e9f-4890-be50-025446a20168-utilities\") pod \"community-operators-xrrmd\" (UID: \"f8e3366f-0e9f-4890-be50-025446a20168\") " pod="openshift-marketplace/community-operators-xrrmd" Nov 25 23:41:49 crc kubenswrapper[5045]: I1125 23:41:49.995753 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wph2\" (UniqueName: \"kubernetes.io/projected/f8e3366f-0e9f-4890-be50-025446a20168-kube-api-access-2wph2\") pod \"community-operators-xrrmd\" (UID: \"f8e3366f-0e9f-4890-be50-025446a20168\") " pod="openshift-marketplace/community-operators-xrrmd" Nov 25 23:41:49 crc kubenswrapper[5045]: I1125 23:41:49.996053 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8e3366f-0e9f-4890-be50-025446a20168-catalog-content\") pod \"community-operators-xrrmd\" (UID: \"f8e3366f-0e9f-4890-be50-025446a20168\") " pod="openshift-marketplace/community-operators-xrrmd" Nov 25 23:41:49 crc kubenswrapper[5045]: I1125 23:41:49.996876 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8e3366f-0e9f-4890-be50-025446a20168-catalog-content\") pod \"community-operators-xrrmd\" (UID: \"f8e3366f-0e9f-4890-be50-025446a20168\") " pod="openshift-marketplace/community-operators-xrrmd" Nov 25 23:41:49 crc kubenswrapper[5045]: I1125 23:41:49.997365 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8e3366f-0e9f-4890-be50-025446a20168-utilities\") pod \"community-operators-xrrmd\" (UID: \"f8e3366f-0e9f-4890-be50-025446a20168\") " pod="openshift-marketplace/community-operators-xrrmd" Nov 25 23:41:50 crc kubenswrapper[5045]: I1125 23:41:50.023859 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wph2\" (UniqueName: \"kubernetes.io/projected/f8e3366f-0e9f-4890-be50-025446a20168-kube-api-access-2wph2\") pod \"community-operators-xrrmd\" (UID: \"f8e3366f-0e9f-4890-be50-025446a20168\") " pod="openshift-marketplace/community-operators-xrrmd" Nov 25 23:41:50 crc kubenswrapper[5045]: I1125 23:41:50.072753 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xrrmd" Nov 25 23:41:50 crc kubenswrapper[5045]: I1125 23:41:50.616144 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xrrmd"] Nov 25 23:41:50 crc kubenswrapper[5045]: W1125 23:41:50.622139 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf8e3366f_0e9f_4890_be50_025446a20168.slice/crio-df393cd0cae406898874bb120da17e5822e200aaab907764272e07790771fec0 WatchSource:0}: Error finding container df393cd0cae406898874bb120da17e5822e200aaab907764272e07790771fec0: Status 404 returned error can't find the container with id df393cd0cae406898874bb120da17e5822e200aaab907764272e07790771fec0 Nov 25 23:41:51 crc kubenswrapper[5045]: I1125 23:41:51.220624 5045 generic.go:334] "Generic (PLEG): container finished" podID="f8e3366f-0e9f-4890-be50-025446a20168" containerID="7d642959575f899768ca7789b36eab9b710c4a0f6761c8cefc9db0cc2a4624a0" exitCode=0 Nov 25 23:41:51 crc kubenswrapper[5045]: I1125 23:41:51.220684 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xrrmd" event={"ID":"f8e3366f-0e9f-4890-be50-025446a20168","Type":"ContainerDied","Data":"7d642959575f899768ca7789b36eab9b710c4a0f6761c8cefc9db0cc2a4624a0"} Nov 25 23:41:51 crc kubenswrapper[5045]: I1125 23:41:51.220758 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xrrmd" event={"ID":"f8e3366f-0e9f-4890-be50-025446a20168","Type":"ContainerStarted","Data":"df393cd0cae406898874bb120da17e5822e200aaab907764272e07790771fec0"} Nov 25 23:41:51 crc kubenswrapper[5045]: I1125 23:41:51.399222 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:41:51 crc kubenswrapper[5045]: E1125 23:41:51.399669 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:41:53 crc kubenswrapper[5045]: I1125 23:41:53.246514 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xrrmd" event={"ID":"f8e3366f-0e9f-4890-be50-025446a20168","Type":"ContainerStarted","Data":"12e577617fe749aca100f956c868d3baa49a91ceabbbf7850bb1a260fbaba31a"} Nov 25 23:41:54 crc kubenswrapper[5045]: I1125 23:41:54.259317 5045 generic.go:334] "Generic (PLEG): container finished" podID="f8e3366f-0e9f-4890-be50-025446a20168" containerID="12e577617fe749aca100f956c868d3baa49a91ceabbbf7850bb1a260fbaba31a" exitCode=0 Nov 25 23:41:54 crc kubenswrapper[5045]: I1125 23:41:54.259410 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xrrmd" event={"ID":"f8e3366f-0e9f-4890-be50-025446a20168","Type":"ContainerDied","Data":"12e577617fe749aca100f956c868d3baa49a91ceabbbf7850bb1a260fbaba31a"} Nov 25 23:41:55 crc kubenswrapper[5045]: I1125 23:41:55.294686 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xrrmd" event={"ID":"f8e3366f-0e9f-4890-be50-025446a20168","Type":"ContainerStarted","Data":"709c40420baec9227461053d4bbc957ca6de3cdda0e1d50ba24e6b9f7706fba6"} Nov 25 23:41:55 crc kubenswrapper[5045]: I1125 23:41:55.335135 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xrrmd" podStartSLOduration=2.871516151 podStartE2EDuration="6.335106358s" podCreationTimestamp="2025-11-25 23:41:49 +0000 UTC" firstStartedPulling="2025-11-25 23:41:51.222822666 +0000 UTC m=+2567.580481818" lastFinishedPulling="2025-11-25 23:41:54.686412873 +0000 UTC m=+2571.044072025" observedRunningTime="2025-11-25 23:41:55.333957925 +0000 UTC m=+2571.691617047" watchObservedRunningTime="2025-11-25 23:41:55.335106358 +0000 UTC m=+2571.692765480" Nov 25 23:42:00 crc kubenswrapper[5045]: I1125 23:42:00.073355 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xrrmd" Nov 25 23:42:00 crc kubenswrapper[5045]: I1125 23:42:00.073779 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xrrmd" Nov 25 23:42:00 crc kubenswrapper[5045]: I1125 23:42:00.157305 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xrrmd" Nov 25 23:42:00 crc kubenswrapper[5045]: I1125 23:42:00.445457 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xrrmd" Nov 25 23:42:00 crc kubenswrapper[5045]: I1125 23:42:00.518063 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xrrmd"] Nov 25 23:42:02 crc kubenswrapper[5045]: I1125 23:42:02.386882 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xrrmd" podUID="f8e3366f-0e9f-4890-be50-025446a20168" containerName="registry-server" containerID="cri-o://709c40420baec9227461053d4bbc957ca6de3cdda0e1d50ba24e6b9f7706fba6" gracePeriod=2 Nov 25 23:42:02 crc kubenswrapper[5045]: I1125 23:42:02.925829 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xrrmd" Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.094087 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8e3366f-0e9f-4890-be50-025446a20168-utilities\") pod \"f8e3366f-0e9f-4890-be50-025446a20168\" (UID: \"f8e3366f-0e9f-4890-be50-025446a20168\") " Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.094357 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8e3366f-0e9f-4890-be50-025446a20168-catalog-content\") pod \"f8e3366f-0e9f-4890-be50-025446a20168\" (UID: \"f8e3366f-0e9f-4890-be50-025446a20168\") " Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.094387 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2wph2\" (UniqueName: \"kubernetes.io/projected/f8e3366f-0e9f-4890-be50-025446a20168-kube-api-access-2wph2\") pod \"f8e3366f-0e9f-4890-be50-025446a20168\" (UID: \"f8e3366f-0e9f-4890-be50-025446a20168\") " Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.095716 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8e3366f-0e9f-4890-be50-025446a20168-utilities" (OuterVolumeSpecName: "utilities") pod "f8e3366f-0e9f-4890-be50-025446a20168" (UID: "f8e3366f-0e9f-4890-be50-025446a20168"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.143051 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8e3366f-0e9f-4890-be50-025446a20168-kube-api-access-2wph2" (OuterVolumeSpecName: "kube-api-access-2wph2") pod "f8e3366f-0e9f-4890-be50-025446a20168" (UID: "f8e3366f-0e9f-4890-be50-025446a20168"). InnerVolumeSpecName "kube-api-access-2wph2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.177706 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8e3366f-0e9f-4890-be50-025446a20168-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f8e3366f-0e9f-4890-be50-025446a20168" (UID: "f8e3366f-0e9f-4890-be50-025446a20168"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.197114 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8e3366f-0e9f-4890-be50-025446a20168-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.197162 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2wph2\" (UniqueName: \"kubernetes.io/projected/f8e3366f-0e9f-4890-be50-025446a20168-kube-api-access-2wph2\") on node \"crc\" DevicePath \"\"" Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.197187 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8e3366f-0e9f-4890-be50-025446a20168-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.408616 5045 generic.go:334] "Generic (PLEG): container finished" podID="f8e3366f-0e9f-4890-be50-025446a20168" containerID="709c40420baec9227461053d4bbc957ca6de3cdda0e1d50ba24e6b9f7706fba6" exitCode=0 Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.408697 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xrrmd" event={"ID":"f8e3366f-0e9f-4890-be50-025446a20168","Type":"ContainerDied","Data":"709c40420baec9227461053d4bbc957ca6de3cdda0e1d50ba24e6b9f7706fba6"} Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.408932 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xrrmd" Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.411686 5045 scope.go:117] "RemoveContainer" containerID="709c40420baec9227461053d4bbc957ca6de3cdda0e1d50ba24e6b9f7706fba6" Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.411544 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xrrmd" event={"ID":"f8e3366f-0e9f-4890-be50-025446a20168","Type":"ContainerDied","Data":"df393cd0cae406898874bb120da17e5822e200aaab907764272e07790771fec0"} Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.452622 5045 scope.go:117] "RemoveContainer" containerID="12e577617fe749aca100f956c868d3baa49a91ceabbbf7850bb1a260fbaba31a" Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.472463 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xrrmd"] Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.486484 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xrrmd"] Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.497071 5045 scope.go:117] "RemoveContainer" containerID="7d642959575f899768ca7789b36eab9b710c4a0f6761c8cefc9db0cc2a4624a0" Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.575480 5045 scope.go:117] "RemoveContainer" containerID="709c40420baec9227461053d4bbc957ca6de3cdda0e1d50ba24e6b9f7706fba6" Nov 25 23:42:03 crc kubenswrapper[5045]: E1125 23:42:03.576323 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"709c40420baec9227461053d4bbc957ca6de3cdda0e1d50ba24e6b9f7706fba6\": container with ID starting with 709c40420baec9227461053d4bbc957ca6de3cdda0e1d50ba24e6b9f7706fba6 not found: ID does not exist" containerID="709c40420baec9227461053d4bbc957ca6de3cdda0e1d50ba24e6b9f7706fba6" Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.576375 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"709c40420baec9227461053d4bbc957ca6de3cdda0e1d50ba24e6b9f7706fba6"} err="failed to get container status \"709c40420baec9227461053d4bbc957ca6de3cdda0e1d50ba24e6b9f7706fba6\": rpc error: code = NotFound desc = could not find container \"709c40420baec9227461053d4bbc957ca6de3cdda0e1d50ba24e6b9f7706fba6\": container with ID starting with 709c40420baec9227461053d4bbc957ca6de3cdda0e1d50ba24e6b9f7706fba6 not found: ID does not exist" Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.576411 5045 scope.go:117] "RemoveContainer" containerID="12e577617fe749aca100f956c868d3baa49a91ceabbbf7850bb1a260fbaba31a" Nov 25 23:42:03 crc kubenswrapper[5045]: E1125 23:42:03.576868 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12e577617fe749aca100f956c868d3baa49a91ceabbbf7850bb1a260fbaba31a\": container with ID starting with 12e577617fe749aca100f956c868d3baa49a91ceabbbf7850bb1a260fbaba31a not found: ID does not exist" containerID="12e577617fe749aca100f956c868d3baa49a91ceabbbf7850bb1a260fbaba31a" Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.576911 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12e577617fe749aca100f956c868d3baa49a91ceabbbf7850bb1a260fbaba31a"} err="failed to get container status \"12e577617fe749aca100f956c868d3baa49a91ceabbbf7850bb1a260fbaba31a\": rpc error: code = NotFound desc = could not find container \"12e577617fe749aca100f956c868d3baa49a91ceabbbf7850bb1a260fbaba31a\": container with ID starting with 12e577617fe749aca100f956c868d3baa49a91ceabbbf7850bb1a260fbaba31a not found: ID does not exist" Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.576943 5045 scope.go:117] "RemoveContainer" containerID="7d642959575f899768ca7789b36eab9b710c4a0f6761c8cefc9db0cc2a4624a0" Nov 25 23:42:03 crc kubenswrapper[5045]: E1125 23:42:03.577360 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d642959575f899768ca7789b36eab9b710c4a0f6761c8cefc9db0cc2a4624a0\": container with ID starting with 7d642959575f899768ca7789b36eab9b710c4a0f6761c8cefc9db0cc2a4624a0 not found: ID does not exist" containerID="7d642959575f899768ca7789b36eab9b710c4a0f6761c8cefc9db0cc2a4624a0" Nov 25 23:42:03 crc kubenswrapper[5045]: I1125 23:42:03.577389 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d642959575f899768ca7789b36eab9b710c4a0f6761c8cefc9db0cc2a4624a0"} err="failed to get container status \"7d642959575f899768ca7789b36eab9b710c4a0f6761c8cefc9db0cc2a4624a0\": rpc error: code = NotFound desc = could not find container \"7d642959575f899768ca7789b36eab9b710c4a0f6761c8cefc9db0cc2a4624a0\": container with ID starting with 7d642959575f899768ca7789b36eab9b710c4a0f6761c8cefc9db0cc2a4624a0 not found: ID does not exist" Nov 25 23:42:03 crc kubenswrapper[5045]: E1125 23:42:03.614496 5045 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf8e3366f_0e9f_4890_be50_025446a20168.slice\": RecentStats: unable to find data in memory cache]" Nov 25 23:42:04 crc kubenswrapper[5045]: I1125 23:42:04.409504 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:42:04 crc kubenswrapper[5045]: I1125 23:42:04.410032 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8e3366f-0e9f-4890-be50-025446a20168" path="/var/lib/kubelet/pods/f8e3366f-0e9f-4890-be50-025446a20168/volumes" Nov 25 23:42:05 crc kubenswrapper[5045]: I1125 23:42:05.442684 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerStarted","Data":"f237bb0a3155d2bd6d4d6edb5b75e2e10e60f5f39cb1458cea502418fd177a1d"} Nov 25 23:42:39 crc kubenswrapper[5045]: I1125 23:42:39.861405 5045 generic.go:334] "Generic (PLEG): container finished" podID="1f6014b1-5500-49d6-a729-dfb677b8a1cc" containerID="585104d1ac0be05a7b066c033d5c1a09f60d107cd1936e30ffa5909429635d11" exitCode=0 Nov 25 23:42:39 crc kubenswrapper[5045]: I1125 23:42:39.861579 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" event={"ID":"1f6014b1-5500-49d6-a729-dfb677b8a1cc","Type":"ContainerDied","Data":"585104d1ac0be05a7b066c033d5c1a09f60d107cd1936e30ffa5909429635d11"} Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.364518 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.427974 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8j8f\" (UniqueName: \"kubernetes.io/projected/1f6014b1-5500-49d6-a729-dfb677b8a1cc-kube-api-access-x8j8f\") pod \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.428103 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-neutron-metadata-combined-ca-bundle\") pod \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.428180 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-ceph\") pod \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.428237 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-nova-metadata-neutron-config-0\") pod \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.428357 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-inventory\") pod \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.428503 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-neutron-ovn-metadata-agent-neutron-config-0\") pod \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.428581 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-ssh-key\") pod \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\" (UID: \"1f6014b1-5500-49d6-a729-dfb677b8a1cc\") " Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.441278 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f6014b1-5500-49d6-a729-dfb677b8a1cc-kube-api-access-x8j8f" (OuterVolumeSpecName: "kube-api-access-x8j8f") pod "1f6014b1-5500-49d6-a729-dfb677b8a1cc" (UID: "1f6014b1-5500-49d6-a729-dfb677b8a1cc"). InnerVolumeSpecName "kube-api-access-x8j8f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.441357 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "1f6014b1-5500-49d6-a729-dfb677b8a1cc" (UID: "1f6014b1-5500-49d6-a729-dfb677b8a1cc"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.443396 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-ceph" (OuterVolumeSpecName: "ceph") pod "1f6014b1-5500-49d6-a729-dfb677b8a1cc" (UID: "1f6014b1-5500-49d6-a729-dfb677b8a1cc"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.487204 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "1f6014b1-5500-49d6-a729-dfb677b8a1cc" (UID: "1f6014b1-5500-49d6-a729-dfb677b8a1cc"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.492449 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1f6014b1-5500-49d6-a729-dfb677b8a1cc" (UID: "1f6014b1-5500-49d6-a729-dfb677b8a1cc"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.496991 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-inventory" (OuterVolumeSpecName: "inventory") pod "1f6014b1-5500-49d6-a729-dfb677b8a1cc" (UID: "1f6014b1-5500-49d6-a729-dfb677b8a1cc"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.503157 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "1f6014b1-5500-49d6-a729-dfb677b8a1cc" (UID: "1f6014b1-5500-49d6-a729-dfb677b8a1cc"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.530587 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8j8f\" (UniqueName: \"kubernetes.io/projected/1f6014b1-5500-49d6-a729-dfb677b8a1cc-kube-api-access-x8j8f\") on node \"crc\" DevicePath \"\"" Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.530614 5045 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.530624 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.530633 5045 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.530643 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.530652 5045 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.530662 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1f6014b1-5500-49d6-a729-dfb677b8a1cc-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.887997 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" event={"ID":"1f6014b1-5500-49d6-a729-dfb677b8a1cc","Type":"ContainerDied","Data":"fde79c2a0c92cf1a71a7878e7468c95d0ed041f0afd152addc0823f89e17ab35"} Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.888062 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fde79c2a0c92cf1a71a7878e7468c95d0ed041f0afd152addc0823f89e17ab35" Nov 25 23:42:41 crc kubenswrapper[5045]: I1125 23:42:41.888142 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.046995 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5"] Nov 25 23:42:42 crc kubenswrapper[5045]: E1125 23:42:42.047496 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8e3366f-0e9f-4890-be50-025446a20168" containerName="registry-server" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.047525 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8e3366f-0e9f-4890-be50-025446a20168" containerName="registry-server" Nov 25 23:42:42 crc kubenswrapper[5045]: E1125 23:42:42.047580 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8e3366f-0e9f-4890-be50-025446a20168" containerName="extract-utilities" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.047596 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8e3366f-0e9f-4890-be50-025446a20168" containerName="extract-utilities" Nov 25 23:42:42 crc kubenswrapper[5045]: E1125 23:42:42.047615 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8e3366f-0e9f-4890-be50-025446a20168" containerName="extract-content" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.047628 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8e3366f-0e9f-4890-be50-025446a20168" containerName="extract-content" Nov 25 23:42:42 crc kubenswrapper[5045]: E1125 23:42:42.047656 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f6014b1-5500-49d6-a729-dfb677b8a1cc" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.047670 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f6014b1-5500-49d6-a729-dfb677b8a1cc" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.048030 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8e3366f-0e9f-4890-be50-025446a20168" containerName="registry-server" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.048084 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f6014b1-5500-49d6-a729-dfb677b8a1cc" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.048996 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.051436 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.052010 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.052060 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.052178 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.052236 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.068884 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.070078 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5"] Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.144956 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.145512 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.145701 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72p7h\" (UniqueName: \"kubernetes.io/projected/886ae822-5a4e-4578-a137-1322687c1a77-kube-api-access-72p7h\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.145971 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.146043 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.146139 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.247788 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.247856 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72p7h\" (UniqueName: \"kubernetes.io/projected/886ae822-5a4e-4578-a137-1322687c1a77-kube-api-access-72p7h\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.247902 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.247934 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.247959 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.248022 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.253451 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.254064 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.254632 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.254886 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.254894 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.269330 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72p7h\" (UniqueName: \"kubernetes.io/projected/886ae822-5a4e-4578-a137-1322687c1a77-kube-api-access-72p7h\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:42:42 crc kubenswrapper[5045]: I1125 23:42:42.412535 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:42:43 crc kubenswrapper[5045]: W1125 23:42:43.073917 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod886ae822_5a4e_4578_a137_1322687c1a77.slice/crio-5c67dc67831aff5b3a57274e6e6fad38b01d832b581acdcb72d18b7d6f314458 WatchSource:0}: Error finding container 5c67dc67831aff5b3a57274e6e6fad38b01d832b581acdcb72d18b7d6f314458: Status 404 returned error can't find the container with id 5c67dc67831aff5b3a57274e6e6fad38b01d832b581acdcb72d18b7d6f314458 Nov 25 23:42:43 crc kubenswrapper[5045]: I1125 23:42:43.088409 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5"] Nov 25 23:42:43 crc kubenswrapper[5045]: I1125 23:42:43.915820 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" event={"ID":"886ae822-5a4e-4578-a137-1322687c1a77","Type":"ContainerStarted","Data":"5c67dc67831aff5b3a57274e6e6fad38b01d832b581acdcb72d18b7d6f314458"} Nov 25 23:42:44 crc kubenswrapper[5045]: I1125 23:42:44.925967 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" event={"ID":"886ae822-5a4e-4578-a137-1322687c1a77","Type":"ContainerStarted","Data":"ad3bea810d2c4ee7bfe72254bce7da94b4981f4f71f26f21682800d45f99ea08"} Nov 25 23:44:23 crc kubenswrapper[5045]: I1125 23:44:23.568449 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" podStartSLOduration=100.1185726 podStartE2EDuration="1m41.568425572s" podCreationTimestamp="2025-11-25 23:42:42 +0000 UTC" firstStartedPulling="2025-11-25 23:42:43.075823827 +0000 UTC m=+2619.433482949" lastFinishedPulling="2025-11-25 23:42:44.525676799 +0000 UTC m=+2620.883335921" observedRunningTime="2025-11-25 23:42:44.947116411 +0000 UTC m=+2621.304775563" watchObservedRunningTime="2025-11-25 23:44:23.568425572 +0000 UTC m=+2719.926084684" Nov 25 23:44:23 crc kubenswrapper[5045]: I1125 23:44:23.576429 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-tjvdv"] Nov 25 23:44:23 crc kubenswrapper[5045]: I1125 23:44:23.578516 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tjvdv" Nov 25 23:44:23 crc kubenswrapper[5045]: I1125 23:44:23.598619 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tjvdv"] Nov 25 23:44:23 crc kubenswrapper[5045]: I1125 23:44:23.637808 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzcpv\" (UniqueName: \"kubernetes.io/projected/821efee1-b367-496a-8340-c36337d39be7-kube-api-access-mzcpv\") pod \"redhat-marketplace-tjvdv\" (UID: \"821efee1-b367-496a-8340-c36337d39be7\") " pod="openshift-marketplace/redhat-marketplace-tjvdv" Nov 25 23:44:23 crc kubenswrapper[5045]: I1125 23:44:23.637893 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/821efee1-b367-496a-8340-c36337d39be7-catalog-content\") pod \"redhat-marketplace-tjvdv\" (UID: \"821efee1-b367-496a-8340-c36337d39be7\") " pod="openshift-marketplace/redhat-marketplace-tjvdv" Nov 25 23:44:23 crc kubenswrapper[5045]: I1125 23:44:23.637983 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/821efee1-b367-496a-8340-c36337d39be7-utilities\") pod \"redhat-marketplace-tjvdv\" (UID: \"821efee1-b367-496a-8340-c36337d39be7\") " pod="openshift-marketplace/redhat-marketplace-tjvdv" Nov 25 23:44:23 crc kubenswrapper[5045]: I1125 23:44:23.739970 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/821efee1-b367-496a-8340-c36337d39be7-utilities\") pod \"redhat-marketplace-tjvdv\" (UID: \"821efee1-b367-496a-8340-c36337d39be7\") " pod="openshift-marketplace/redhat-marketplace-tjvdv" Nov 25 23:44:23 crc kubenswrapper[5045]: I1125 23:44:23.740221 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzcpv\" (UniqueName: \"kubernetes.io/projected/821efee1-b367-496a-8340-c36337d39be7-kube-api-access-mzcpv\") pod \"redhat-marketplace-tjvdv\" (UID: \"821efee1-b367-496a-8340-c36337d39be7\") " pod="openshift-marketplace/redhat-marketplace-tjvdv" Nov 25 23:44:23 crc kubenswrapper[5045]: I1125 23:44:23.740265 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/821efee1-b367-496a-8340-c36337d39be7-catalog-content\") pod \"redhat-marketplace-tjvdv\" (UID: \"821efee1-b367-496a-8340-c36337d39be7\") " pod="openshift-marketplace/redhat-marketplace-tjvdv" Nov 25 23:44:23 crc kubenswrapper[5045]: I1125 23:44:23.740551 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/821efee1-b367-496a-8340-c36337d39be7-utilities\") pod \"redhat-marketplace-tjvdv\" (UID: \"821efee1-b367-496a-8340-c36337d39be7\") " pod="openshift-marketplace/redhat-marketplace-tjvdv" Nov 25 23:44:23 crc kubenswrapper[5045]: I1125 23:44:23.740580 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/821efee1-b367-496a-8340-c36337d39be7-catalog-content\") pod \"redhat-marketplace-tjvdv\" (UID: \"821efee1-b367-496a-8340-c36337d39be7\") " pod="openshift-marketplace/redhat-marketplace-tjvdv" Nov 25 23:44:23 crc kubenswrapper[5045]: I1125 23:44:23.763766 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzcpv\" (UniqueName: \"kubernetes.io/projected/821efee1-b367-496a-8340-c36337d39be7-kube-api-access-mzcpv\") pod \"redhat-marketplace-tjvdv\" (UID: \"821efee1-b367-496a-8340-c36337d39be7\") " pod="openshift-marketplace/redhat-marketplace-tjvdv" Nov 25 23:44:23 crc kubenswrapper[5045]: I1125 23:44:23.907765 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tjvdv" Nov 25 23:44:24 crc kubenswrapper[5045]: I1125 23:44:24.394728 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tjvdv"] Nov 25 23:44:24 crc kubenswrapper[5045]: W1125 23:44:24.424064 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod821efee1_b367_496a_8340_c36337d39be7.slice/crio-e068009004f8b1b0752bef5acfda948a7178546cffbe371200984cacf5c239eb WatchSource:0}: Error finding container e068009004f8b1b0752bef5acfda948a7178546cffbe371200984cacf5c239eb: Status 404 returned error can't find the container with id e068009004f8b1b0752bef5acfda948a7178546cffbe371200984cacf5c239eb Nov 25 23:44:25 crc kubenswrapper[5045]: I1125 23:44:25.113401 5045 generic.go:334] "Generic (PLEG): container finished" podID="821efee1-b367-496a-8340-c36337d39be7" containerID="269bc7a191d35e22493ddb0ba911ce451f5071b9e36bc8e9f3c303cc91ea0b0f" exitCode=0 Nov 25 23:44:25 crc kubenswrapper[5045]: I1125 23:44:25.113481 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tjvdv" event={"ID":"821efee1-b367-496a-8340-c36337d39be7","Type":"ContainerDied","Data":"269bc7a191d35e22493ddb0ba911ce451f5071b9e36bc8e9f3c303cc91ea0b0f"} Nov 25 23:44:25 crc kubenswrapper[5045]: I1125 23:44:25.113765 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tjvdv" event={"ID":"821efee1-b367-496a-8340-c36337d39be7","Type":"ContainerStarted","Data":"e068009004f8b1b0752bef5acfda948a7178546cffbe371200984cacf5c239eb"} Nov 25 23:44:25 crc kubenswrapper[5045]: I1125 23:44:25.117914 5045 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 23:44:27 crc kubenswrapper[5045]: I1125 23:44:27.132605 5045 generic.go:334] "Generic (PLEG): container finished" podID="821efee1-b367-496a-8340-c36337d39be7" containerID="c19806bc1658b6d2aa4295091f1771f80fdc12859f54ba053111beb1af03546e" exitCode=0 Nov 25 23:44:27 crc kubenswrapper[5045]: I1125 23:44:27.132699 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tjvdv" event={"ID":"821efee1-b367-496a-8340-c36337d39be7","Type":"ContainerDied","Data":"c19806bc1658b6d2aa4295091f1771f80fdc12859f54ba053111beb1af03546e"} Nov 25 23:44:28 crc kubenswrapper[5045]: I1125 23:44:28.144486 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tjvdv" event={"ID":"821efee1-b367-496a-8340-c36337d39be7","Type":"ContainerStarted","Data":"23006abc51f90c03f694c595b91287614e2e6708f8e0d4764e0edfeadefdf268"} Nov 25 23:44:28 crc kubenswrapper[5045]: I1125 23:44:28.173019 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-tjvdv" podStartSLOduration=2.5942197460000003 podStartE2EDuration="5.172999595s" podCreationTimestamp="2025-11-25 23:44:23 +0000 UTC" firstStartedPulling="2025-11-25 23:44:25.117486852 +0000 UTC m=+2721.475146004" lastFinishedPulling="2025-11-25 23:44:27.696266731 +0000 UTC m=+2724.053925853" observedRunningTime="2025-11-25 23:44:28.165118056 +0000 UTC m=+2724.522777208" watchObservedRunningTime="2025-11-25 23:44:28.172999595 +0000 UTC m=+2724.530658707" Nov 25 23:44:30 crc kubenswrapper[5045]: I1125 23:44:30.540575 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:44:30 crc kubenswrapper[5045]: I1125 23:44:30.540902 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:44:33 crc kubenswrapper[5045]: I1125 23:44:33.908941 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-tjvdv" Nov 25 23:44:33 crc kubenswrapper[5045]: I1125 23:44:33.909446 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-tjvdv" Nov 25 23:44:33 crc kubenswrapper[5045]: I1125 23:44:33.993138 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-tjvdv" Nov 25 23:44:34 crc kubenswrapper[5045]: I1125 23:44:34.267193 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-tjvdv" Nov 25 23:44:34 crc kubenswrapper[5045]: I1125 23:44:34.326573 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tjvdv"] Nov 25 23:44:36 crc kubenswrapper[5045]: I1125 23:44:36.238828 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-tjvdv" podUID="821efee1-b367-496a-8340-c36337d39be7" containerName="registry-server" containerID="cri-o://23006abc51f90c03f694c595b91287614e2e6708f8e0d4764e0edfeadefdf268" gracePeriod=2 Nov 25 23:44:36 crc kubenswrapper[5045]: I1125 23:44:36.656504 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tjvdv" Nov 25 23:44:36 crc kubenswrapper[5045]: I1125 23:44:36.847002 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/821efee1-b367-496a-8340-c36337d39be7-utilities\") pod \"821efee1-b367-496a-8340-c36337d39be7\" (UID: \"821efee1-b367-496a-8340-c36337d39be7\") " Nov 25 23:44:36 crc kubenswrapper[5045]: I1125 23:44:36.847129 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mzcpv\" (UniqueName: \"kubernetes.io/projected/821efee1-b367-496a-8340-c36337d39be7-kube-api-access-mzcpv\") pod \"821efee1-b367-496a-8340-c36337d39be7\" (UID: \"821efee1-b367-496a-8340-c36337d39be7\") " Nov 25 23:44:36 crc kubenswrapper[5045]: I1125 23:44:36.847167 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/821efee1-b367-496a-8340-c36337d39be7-catalog-content\") pod \"821efee1-b367-496a-8340-c36337d39be7\" (UID: \"821efee1-b367-496a-8340-c36337d39be7\") " Nov 25 23:44:36 crc kubenswrapper[5045]: I1125 23:44:36.848385 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/821efee1-b367-496a-8340-c36337d39be7-utilities" (OuterVolumeSpecName: "utilities") pod "821efee1-b367-496a-8340-c36337d39be7" (UID: "821efee1-b367-496a-8340-c36337d39be7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:44:36 crc kubenswrapper[5045]: I1125 23:44:36.860400 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/821efee1-b367-496a-8340-c36337d39be7-kube-api-access-mzcpv" (OuterVolumeSpecName: "kube-api-access-mzcpv") pod "821efee1-b367-496a-8340-c36337d39be7" (UID: "821efee1-b367-496a-8340-c36337d39be7"). InnerVolumeSpecName "kube-api-access-mzcpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:44:36 crc kubenswrapper[5045]: I1125 23:44:36.884745 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/821efee1-b367-496a-8340-c36337d39be7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "821efee1-b367-496a-8340-c36337d39be7" (UID: "821efee1-b367-496a-8340-c36337d39be7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:44:36 crc kubenswrapper[5045]: I1125 23:44:36.949497 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/821efee1-b367-496a-8340-c36337d39be7-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:44:36 crc kubenswrapper[5045]: I1125 23:44:36.949541 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mzcpv\" (UniqueName: \"kubernetes.io/projected/821efee1-b367-496a-8340-c36337d39be7-kube-api-access-mzcpv\") on node \"crc\" DevicePath \"\"" Nov 25 23:44:36 crc kubenswrapper[5045]: I1125 23:44:36.949556 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/821efee1-b367-496a-8340-c36337d39be7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:44:37 crc kubenswrapper[5045]: I1125 23:44:37.250818 5045 generic.go:334] "Generic (PLEG): container finished" podID="821efee1-b367-496a-8340-c36337d39be7" containerID="23006abc51f90c03f694c595b91287614e2e6708f8e0d4764e0edfeadefdf268" exitCode=0 Nov 25 23:44:37 crc kubenswrapper[5045]: I1125 23:44:37.250908 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tjvdv" Nov 25 23:44:37 crc kubenswrapper[5045]: I1125 23:44:37.250928 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tjvdv" event={"ID":"821efee1-b367-496a-8340-c36337d39be7","Type":"ContainerDied","Data":"23006abc51f90c03f694c595b91287614e2e6708f8e0d4764e0edfeadefdf268"} Nov 25 23:44:37 crc kubenswrapper[5045]: I1125 23:44:37.251729 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tjvdv" event={"ID":"821efee1-b367-496a-8340-c36337d39be7","Type":"ContainerDied","Data":"e068009004f8b1b0752bef5acfda948a7178546cffbe371200984cacf5c239eb"} Nov 25 23:44:37 crc kubenswrapper[5045]: I1125 23:44:37.251752 5045 scope.go:117] "RemoveContainer" containerID="23006abc51f90c03f694c595b91287614e2e6708f8e0d4764e0edfeadefdf268" Nov 25 23:44:37 crc kubenswrapper[5045]: I1125 23:44:37.306804 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tjvdv"] Nov 25 23:44:37 crc kubenswrapper[5045]: I1125 23:44:37.323886 5045 scope.go:117] "RemoveContainer" containerID="c19806bc1658b6d2aa4295091f1771f80fdc12859f54ba053111beb1af03546e" Nov 25 23:44:37 crc kubenswrapper[5045]: I1125 23:44:37.334170 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-tjvdv"] Nov 25 23:44:37 crc kubenswrapper[5045]: I1125 23:44:37.388811 5045 scope.go:117] "RemoveContainer" containerID="269bc7a191d35e22493ddb0ba911ce451f5071b9e36bc8e9f3c303cc91ea0b0f" Nov 25 23:44:37 crc kubenswrapper[5045]: I1125 23:44:37.408937 5045 scope.go:117] "RemoveContainer" containerID="23006abc51f90c03f694c595b91287614e2e6708f8e0d4764e0edfeadefdf268" Nov 25 23:44:37 crc kubenswrapper[5045]: E1125 23:44:37.409258 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"23006abc51f90c03f694c595b91287614e2e6708f8e0d4764e0edfeadefdf268\": container with ID starting with 23006abc51f90c03f694c595b91287614e2e6708f8e0d4764e0edfeadefdf268 not found: ID does not exist" containerID="23006abc51f90c03f694c595b91287614e2e6708f8e0d4764e0edfeadefdf268" Nov 25 23:44:37 crc kubenswrapper[5045]: I1125 23:44:37.409297 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23006abc51f90c03f694c595b91287614e2e6708f8e0d4764e0edfeadefdf268"} err="failed to get container status \"23006abc51f90c03f694c595b91287614e2e6708f8e0d4764e0edfeadefdf268\": rpc error: code = NotFound desc = could not find container \"23006abc51f90c03f694c595b91287614e2e6708f8e0d4764e0edfeadefdf268\": container with ID starting with 23006abc51f90c03f694c595b91287614e2e6708f8e0d4764e0edfeadefdf268 not found: ID does not exist" Nov 25 23:44:37 crc kubenswrapper[5045]: I1125 23:44:37.409324 5045 scope.go:117] "RemoveContainer" containerID="c19806bc1658b6d2aa4295091f1771f80fdc12859f54ba053111beb1af03546e" Nov 25 23:44:37 crc kubenswrapper[5045]: E1125 23:44:37.409677 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c19806bc1658b6d2aa4295091f1771f80fdc12859f54ba053111beb1af03546e\": container with ID starting with c19806bc1658b6d2aa4295091f1771f80fdc12859f54ba053111beb1af03546e not found: ID does not exist" containerID="c19806bc1658b6d2aa4295091f1771f80fdc12859f54ba053111beb1af03546e" Nov 25 23:44:37 crc kubenswrapper[5045]: I1125 23:44:37.409697 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c19806bc1658b6d2aa4295091f1771f80fdc12859f54ba053111beb1af03546e"} err="failed to get container status \"c19806bc1658b6d2aa4295091f1771f80fdc12859f54ba053111beb1af03546e\": rpc error: code = NotFound desc = could not find container \"c19806bc1658b6d2aa4295091f1771f80fdc12859f54ba053111beb1af03546e\": container with ID starting with c19806bc1658b6d2aa4295091f1771f80fdc12859f54ba053111beb1af03546e not found: ID does not exist" Nov 25 23:44:37 crc kubenswrapper[5045]: I1125 23:44:37.409722 5045 scope.go:117] "RemoveContainer" containerID="269bc7a191d35e22493ddb0ba911ce451f5071b9e36bc8e9f3c303cc91ea0b0f" Nov 25 23:44:37 crc kubenswrapper[5045]: E1125 23:44:37.454539 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"269bc7a191d35e22493ddb0ba911ce451f5071b9e36bc8e9f3c303cc91ea0b0f\": container with ID starting with 269bc7a191d35e22493ddb0ba911ce451f5071b9e36bc8e9f3c303cc91ea0b0f not found: ID does not exist" containerID="269bc7a191d35e22493ddb0ba911ce451f5071b9e36bc8e9f3c303cc91ea0b0f" Nov 25 23:44:37 crc kubenswrapper[5045]: I1125 23:44:37.454573 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"269bc7a191d35e22493ddb0ba911ce451f5071b9e36bc8e9f3c303cc91ea0b0f"} err="failed to get container status \"269bc7a191d35e22493ddb0ba911ce451f5071b9e36bc8e9f3c303cc91ea0b0f\": rpc error: code = NotFound desc = could not find container \"269bc7a191d35e22493ddb0ba911ce451f5071b9e36bc8e9f3c303cc91ea0b0f\": container with ID starting with 269bc7a191d35e22493ddb0ba911ce451f5071b9e36bc8e9f3c303cc91ea0b0f not found: ID does not exist" Nov 25 23:44:38 crc kubenswrapper[5045]: I1125 23:44:38.407817 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="821efee1-b367-496a-8340-c36337d39be7" path="/var/lib/kubelet/pods/821efee1-b367-496a-8340-c36337d39be7/volumes" Nov 25 23:44:43 crc kubenswrapper[5045]: I1125 23:44:43.870460 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-bw9d7"] Nov 25 23:44:43 crc kubenswrapper[5045]: E1125 23:44:43.871742 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="821efee1-b367-496a-8340-c36337d39be7" containerName="extract-content" Nov 25 23:44:43 crc kubenswrapper[5045]: I1125 23:44:43.871765 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="821efee1-b367-496a-8340-c36337d39be7" containerName="extract-content" Nov 25 23:44:43 crc kubenswrapper[5045]: E1125 23:44:43.871805 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="821efee1-b367-496a-8340-c36337d39be7" containerName="extract-utilities" Nov 25 23:44:43 crc kubenswrapper[5045]: I1125 23:44:43.871819 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="821efee1-b367-496a-8340-c36337d39be7" containerName="extract-utilities" Nov 25 23:44:43 crc kubenswrapper[5045]: E1125 23:44:43.871863 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="821efee1-b367-496a-8340-c36337d39be7" containerName="registry-server" Nov 25 23:44:43 crc kubenswrapper[5045]: I1125 23:44:43.871881 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="821efee1-b367-496a-8340-c36337d39be7" containerName="registry-server" Nov 25 23:44:43 crc kubenswrapper[5045]: I1125 23:44:43.872252 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="821efee1-b367-496a-8340-c36337d39be7" containerName="registry-server" Nov 25 23:44:43 crc kubenswrapper[5045]: I1125 23:44:43.874645 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bw9d7" Nov 25 23:44:43 crc kubenswrapper[5045]: I1125 23:44:43.882628 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bw9d7"] Nov 25 23:44:43 crc kubenswrapper[5045]: I1125 23:44:43.984585 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27cdda64-74b9-43cb-bcf5-b8ba1425a3f2-utilities\") pod \"certified-operators-bw9d7\" (UID: \"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2\") " pod="openshift-marketplace/certified-operators-bw9d7" Nov 25 23:44:43 crc kubenswrapper[5045]: I1125 23:44:43.984779 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27cdda64-74b9-43cb-bcf5-b8ba1425a3f2-catalog-content\") pod \"certified-operators-bw9d7\" (UID: \"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2\") " pod="openshift-marketplace/certified-operators-bw9d7" Nov 25 23:44:43 crc kubenswrapper[5045]: I1125 23:44:43.985006 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dgnpc\" (UniqueName: \"kubernetes.io/projected/27cdda64-74b9-43cb-bcf5-b8ba1425a3f2-kube-api-access-dgnpc\") pod \"certified-operators-bw9d7\" (UID: \"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2\") " pod="openshift-marketplace/certified-operators-bw9d7" Nov 25 23:44:44 crc kubenswrapper[5045]: I1125 23:44:44.086894 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27cdda64-74b9-43cb-bcf5-b8ba1425a3f2-utilities\") pod \"certified-operators-bw9d7\" (UID: \"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2\") " pod="openshift-marketplace/certified-operators-bw9d7" Nov 25 23:44:44 crc kubenswrapper[5045]: I1125 23:44:44.086966 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27cdda64-74b9-43cb-bcf5-b8ba1425a3f2-catalog-content\") pod \"certified-operators-bw9d7\" (UID: \"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2\") " pod="openshift-marketplace/certified-operators-bw9d7" Nov 25 23:44:44 crc kubenswrapper[5045]: I1125 23:44:44.087044 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dgnpc\" (UniqueName: \"kubernetes.io/projected/27cdda64-74b9-43cb-bcf5-b8ba1425a3f2-kube-api-access-dgnpc\") pod \"certified-operators-bw9d7\" (UID: \"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2\") " pod="openshift-marketplace/certified-operators-bw9d7" Nov 25 23:44:44 crc kubenswrapper[5045]: I1125 23:44:44.087508 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27cdda64-74b9-43cb-bcf5-b8ba1425a3f2-utilities\") pod \"certified-operators-bw9d7\" (UID: \"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2\") " pod="openshift-marketplace/certified-operators-bw9d7" Nov 25 23:44:44 crc kubenswrapper[5045]: I1125 23:44:44.087569 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27cdda64-74b9-43cb-bcf5-b8ba1425a3f2-catalog-content\") pod \"certified-operators-bw9d7\" (UID: \"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2\") " pod="openshift-marketplace/certified-operators-bw9d7" Nov 25 23:44:44 crc kubenswrapper[5045]: I1125 23:44:44.106597 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dgnpc\" (UniqueName: \"kubernetes.io/projected/27cdda64-74b9-43cb-bcf5-b8ba1425a3f2-kube-api-access-dgnpc\") pod \"certified-operators-bw9d7\" (UID: \"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2\") " pod="openshift-marketplace/certified-operators-bw9d7" Nov 25 23:44:44 crc kubenswrapper[5045]: I1125 23:44:44.203879 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bw9d7" Nov 25 23:44:44 crc kubenswrapper[5045]: I1125 23:44:44.709943 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bw9d7"] Nov 25 23:44:45 crc kubenswrapper[5045]: I1125 23:44:45.336149 5045 generic.go:334] "Generic (PLEG): container finished" podID="27cdda64-74b9-43cb-bcf5-b8ba1425a3f2" containerID="b48751ffd5c1cba5dd7c25eceae6292a0849967ada7c0c83b1ac2141cbc254fc" exitCode=0 Nov 25 23:44:45 crc kubenswrapper[5045]: I1125 23:44:45.336419 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bw9d7" event={"ID":"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2","Type":"ContainerDied","Data":"b48751ffd5c1cba5dd7c25eceae6292a0849967ada7c0c83b1ac2141cbc254fc"} Nov 25 23:44:45 crc kubenswrapper[5045]: I1125 23:44:45.336447 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bw9d7" event={"ID":"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2","Type":"ContainerStarted","Data":"ed995caf748957fd784b90df3c0ad12f18a893cdbab82d1457bd9637c459dd82"} Nov 25 23:44:46 crc kubenswrapper[5045]: I1125 23:44:46.345835 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bw9d7" event={"ID":"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2","Type":"ContainerStarted","Data":"b181aee141f8283266a6f0ed8b613fa50113c1fa49a55c2f63db47b99b534938"} Nov 25 23:44:47 crc kubenswrapper[5045]: I1125 23:44:47.356607 5045 generic.go:334] "Generic (PLEG): container finished" podID="27cdda64-74b9-43cb-bcf5-b8ba1425a3f2" containerID="b181aee141f8283266a6f0ed8b613fa50113c1fa49a55c2f63db47b99b534938" exitCode=0 Nov 25 23:44:47 crc kubenswrapper[5045]: I1125 23:44:47.356745 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bw9d7" event={"ID":"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2","Type":"ContainerDied","Data":"b181aee141f8283266a6f0ed8b613fa50113c1fa49a55c2f63db47b99b534938"} Nov 25 23:44:48 crc kubenswrapper[5045]: I1125 23:44:48.384413 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bw9d7" event={"ID":"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2","Type":"ContainerStarted","Data":"8de108fb9a4f1247c68ff25c0f5db1c1332a4de87eaab629a2aa71f0a9d2c7eb"} Nov 25 23:44:48 crc kubenswrapper[5045]: I1125 23:44:48.417090 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-bw9d7" podStartSLOduration=2.864478712 podStartE2EDuration="5.417068504s" podCreationTimestamp="2025-11-25 23:44:43 +0000 UTC" firstStartedPulling="2025-11-25 23:44:45.338100897 +0000 UTC m=+2741.695760019" lastFinishedPulling="2025-11-25 23:44:47.890690689 +0000 UTC m=+2744.248349811" observedRunningTime="2025-11-25 23:44:48.405069999 +0000 UTC m=+2744.762729111" watchObservedRunningTime="2025-11-25 23:44:48.417068504 +0000 UTC m=+2744.774727626" Nov 25 23:44:54 crc kubenswrapper[5045]: I1125 23:44:54.204406 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-bw9d7" Nov 25 23:44:54 crc kubenswrapper[5045]: I1125 23:44:54.205122 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-bw9d7" Nov 25 23:44:54 crc kubenswrapper[5045]: I1125 23:44:54.294470 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-bw9d7" Nov 25 23:44:54 crc kubenswrapper[5045]: I1125 23:44:54.499906 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-bw9d7" Nov 25 23:44:54 crc kubenswrapper[5045]: I1125 23:44:54.786374 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bw9d7"] Nov 25 23:44:56 crc kubenswrapper[5045]: I1125 23:44:56.480850 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-bw9d7" podUID="27cdda64-74b9-43cb-bcf5-b8ba1425a3f2" containerName="registry-server" containerID="cri-o://8de108fb9a4f1247c68ff25c0f5db1c1332a4de87eaab629a2aa71f0a9d2c7eb" gracePeriod=2 Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.042850 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bw9d7" Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.171528 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dgnpc\" (UniqueName: \"kubernetes.io/projected/27cdda64-74b9-43cb-bcf5-b8ba1425a3f2-kube-api-access-dgnpc\") pod \"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2\" (UID: \"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2\") " Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.171602 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27cdda64-74b9-43cb-bcf5-b8ba1425a3f2-utilities\") pod \"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2\" (UID: \"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2\") " Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.171759 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27cdda64-74b9-43cb-bcf5-b8ba1425a3f2-catalog-content\") pod \"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2\" (UID: \"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2\") " Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.172589 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27cdda64-74b9-43cb-bcf5-b8ba1425a3f2-utilities" (OuterVolumeSpecName: "utilities") pod "27cdda64-74b9-43cb-bcf5-b8ba1425a3f2" (UID: "27cdda64-74b9-43cb-bcf5-b8ba1425a3f2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.183308 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27cdda64-74b9-43cb-bcf5-b8ba1425a3f2-kube-api-access-dgnpc" (OuterVolumeSpecName: "kube-api-access-dgnpc") pod "27cdda64-74b9-43cb-bcf5-b8ba1425a3f2" (UID: "27cdda64-74b9-43cb-bcf5-b8ba1425a3f2"). InnerVolumeSpecName "kube-api-access-dgnpc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.273761 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dgnpc\" (UniqueName: \"kubernetes.io/projected/27cdda64-74b9-43cb-bcf5-b8ba1425a3f2-kube-api-access-dgnpc\") on node \"crc\" DevicePath \"\"" Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.273789 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27cdda64-74b9-43cb-bcf5-b8ba1425a3f2-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.527100 5045 generic.go:334] "Generic (PLEG): container finished" podID="27cdda64-74b9-43cb-bcf5-b8ba1425a3f2" containerID="8de108fb9a4f1247c68ff25c0f5db1c1332a4de87eaab629a2aa71f0a9d2c7eb" exitCode=0 Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.527159 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bw9d7" event={"ID":"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2","Type":"ContainerDied","Data":"8de108fb9a4f1247c68ff25c0f5db1c1332a4de87eaab629a2aa71f0a9d2c7eb"} Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.527412 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bw9d7" Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.528885 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bw9d7" event={"ID":"27cdda64-74b9-43cb-bcf5-b8ba1425a3f2","Type":"ContainerDied","Data":"ed995caf748957fd784b90df3c0ad12f18a893cdbab82d1457bd9637c459dd82"} Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.528998 5045 scope.go:117] "RemoveContainer" containerID="8de108fb9a4f1247c68ff25c0f5db1c1332a4de87eaab629a2aa71f0a9d2c7eb" Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.555350 5045 scope.go:117] "RemoveContainer" containerID="b181aee141f8283266a6f0ed8b613fa50113c1fa49a55c2f63db47b99b534938" Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.579963 5045 scope.go:117] "RemoveContainer" containerID="b48751ffd5c1cba5dd7c25eceae6292a0849967ada7c0c83b1ac2141cbc254fc" Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.633949 5045 scope.go:117] "RemoveContainer" containerID="8de108fb9a4f1247c68ff25c0f5db1c1332a4de87eaab629a2aa71f0a9d2c7eb" Nov 25 23:44:57 crc kubenswrapper[5045]: E1125 23:44:57.634584 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8de108fb9a4f1247c68ff25c0f5db1c1332a4de87eaab629a2aa71f0a9d2c7eb\": container with ID starting with 8de108fb9a4f1247c68ff25c0f5db1c1332a4de87eaab629a2aa71f0a9d2c7eb not found: ID does not exist" containerID="8de108fb9a4f1247c68ff25c0f5db1c1332a4de87eaab629a2aa71f0a9d2c7eb" Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.634688 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8de108fb9a4f1247c68ff25c0f5db1c1332a4de87eaab629a2aa71f0a9d2c7eb"} err="failed to get container status \"8de108fb9a4f1247c68ff25c0f5db1c1332a4de87eaab629a2aa71f0a9d2c7eb\": rpc error: code = NotFound desc = could not find container \"8de108fb9a4f1247c68ff25c0f5db1c1332a4de87eaab629a2aa71f0a9d2c7eb\": container with ID starting with 8de108fb9a4f1247c68ff25c0f5db1c1332a4de87eaab629a2aa71f0a9d2c7eb not found: ID does not exist" Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.634802 5045 scope.go:117] "RemoveContainer" containerID="b181aee141f8283266a6f0ed8b613fa50113c1fa49a55c2f63db47b99b534938" Nov 25 23:44:57 crc kubenswrapper[5045]: E1125 23:44:57.635242 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b181aee141f8283266a6f0ed8b613fa50113c1fa49a55c2f63db47b99b534938\": container with ID starting with b181aee141f8283266a6f0ed8b613fa50113c1fa49a55c2f63db47b99b534938 not found: ID does not exist" containerID="b181aee141f8283266a6f0ed8b613fa50113c1fa49a55c2f63db47b99b534938" Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.635298 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b181aee141f8283266a6f0ed8b613fa50113c1fa49a55c2f63db47b99b534938"} err="failed to get container status \"b181aee141f8283266a6f0ed8b613fa50113c1fa49a55c2f63db47b99b534938\": rpc error: code = NotFound desc = could not find container \"b181aee141f8283266a6f0ed8b613fa50113c1fa49a55c2f63db47b99b534938\": container with ID starting with b181aee141f8283266a6f0ed8b613fa50113c1fa49a55c2f63db47b99b534938 not found: ID does not exist" Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.635336 5045 scope.go:117] "RemoveContainer" containerID="b48751ffd5c1cba5dd7c25eceae6292a0849967ada7c0c83b1ac2141cbc254fc" Nov 25 23:44:57 crc kubenswrapper[5045]: E1125 23:44:57.635793 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b48751ffd5c1cba5dd7c25eceae6292a0849967ada7c0c83b1ac2141cbc254fc\": container with ID starting with b48751ffd5c1cba5dd7c25eceae6292a0849967ada7c0c83b1ac2141cbc254fc not found: ID does not exist" containerID="b48751ffd5c1cba5dd7c25eceae6292a0849967ada7c0c83b1ac2141cbc254fc" Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.635852 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b48751ffd5c1cba5dd7c25eceae6292a0849967ada7c0c83b1ac2141cbc254fc"} err="failed to get container status \"b48751ffd5c1cba5dd7c25eceae6292a0849967ada7c0c83b1ac2141cbc254fc\": rpc error: code = NotFound desc = could not find container \"b48751ffd5c1cba5dd7c25eceae6292a0849967ada7c0c83b1ac2141cbc254fc\": container with ID starting with b48751ffd5c1cba5dd7c25eceae6292a0849967ada7c0c83b1ac2141cbc254fc not found: ID does not exist" Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.909971 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27cdda64-74b9-43cb-bcf5-b8ba1425a3f2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "27cdda64-74b9-43cb-bcf5-b8ba1425a3f2" (UID: "27cdda64-74b9-43cb-bcf5-b8ba1425a3f2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:44:57 crc kubenswrapper[5045]: I1125 23:44:57.927237 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27cdda64-74b9-43cb-bcf5-b8ba1425a3f2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:44:58 crc kubenswrapper[5045]: I1125 23:44:58.163686 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bw9d7"] Nov 25 23:44:58 crc kubenswrapper[5045]: I1125 23:44:58.172842 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-bw9d7"] Nov 25 23:44:58 crc kubenswrapper[5045]: I1125 23:44:58.413017 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27cdda64-74b9-43cb-bcf5-b8ba1425a3f2" path="/var/lib/kubelet/pods/27cdda64-74b9-43cb-bcf5-b8ba1425a3f2/volumes" Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.176889 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401905-j2zvq"] Nov 25 23:45:00 crc kubenswrapper[5045]: E1125 23:45:00.177805 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27cdda64-74b9-43cb-bcf5-b8ba1425a3f2" containerName="extract-content" Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.177828 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="27cdda64-74b9-43cb-bcf5-b8ba1425a3f2" containerName="extract-content" Nov 25 23:45:00 crc kubenswrapper[5045]: E1125 23:45:00.177870 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27cdda64-74b9-43cb-bcf5-b8ba1425a3f2" containerName="registry-server" Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.177883 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="27cdda64-74b9-43cb-bcf5-b8ba1425a3f2" containerName="registry-server" Nov 25 23:45:00 crc kubenswrapper[5045]: E1125 23:45:00.177928 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27cdda64-74b9-43cb-bcf5-b8ba1425a3f2" containerName="extract-utilities" Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.177941 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="27cdda64-74b9-43cb-bcf5-b8ba1425a3f2" containerName="extract-utilities" Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.178228 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="27cdda64-74b9-43cb-bcf5-b8ba1425a3f2" containerName="registry-server" Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.179271 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401905-j2zvq" Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.185501 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.186442 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.192422 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401905-j2zvq"] Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.270704 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfr6p\" (UniqueName: \"kubernetes.io/projected/76078514-9f03-49b4-a27b-5ea43eec3e93-kube-api-access-dfr6p\") pod \"collect-profiles-29401905-j2zvq\" (UID: \"76078514-9f03-49b4-a27b-5ea43eec3e93\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401905-j2zvq" Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.271126 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/76078514-9f03-49b4-a27b-5ea43eec3e93-config-volume\") pod \"collect-profiles-29401905-j2zvq\" (UID: \"76078514-9f03-49b4-a27b-5ea43eec3e93\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401905-j2zvq" Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.271380 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/76078514-9f03-49b4-a27b-5ea43eec3e93-secret-volume\") pod \"collect-profiles-29401905-j2zvq\" (UID: \"76078514-9f03-49b4-a27b-5ea43eec3e93\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401905-j2zvq" Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.373955 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/76078514-9f03-49b4-a27b-5ea43eec3e93-config-volume\") pod \"collect-profiles-29401905-j2zvq\" (UID: \"76078514-9f03-49b4-a27b-5ea43eec3e93\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401905-j2zvq" Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.374452 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/76078514-9f03-49b4-a27b-5ea43eec3e93-secret-volume\") pod \"collect-profiles-29401905-j2zvq\" (UID: \"76078514-9f03-49b4-a27b-5ea43eec3e93\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401905-j2zvq" Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.374918 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dfr6p\" (UniqueName: \"kubernetes.io/projected/76078514-9f03-49b4-a27b-5ea43eec3e93-kube-api-access-dfr6p\") pod \"collect-profiles-29401905-j2zvq\" (UID: \"76078514-9f03-49b4-a27b-5ea43eec3e93\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401905-j2zvq" Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.377446 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/76078514-9f03-49b4-a27b-5ea43eec3e93-config-volume\") pod \"collect-profiles-29401905-j2zvq\" (UID: \"76078514-9f03-49b4-a27b-5ea43eec3e93\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401905-j2zvq" Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.384215 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/76078514-9f03-49b4-a27b-5ea43eec3e93-secret-volume\") pod \"collect-profiles-29401905-j2zvq\" (UID: \"76078514-9f03-49b4-a27b-5ea43eec3e93\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401905-j2zvq" Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.406985 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfr6p\" (UniqueName: \"kubernetes.io/projected/76078514-9f03-49b4-a27b-5ea43eec3e93-kube-api-access-dfr6p\") pod \"collect-profiles-29401905-j2zvq\" (UID: \"76078514-9f03-49b4-a27b-5ea43eec3e93\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401905-j2zvq" Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.514986 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401905-j2zvq" Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.541216 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:45:00 crc kubenswrapper[5045]: I1125 23:45:00.541302 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:45:01 crc kubenswrapper[5045]: I1125 23:45:01.008449 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401905-j2zvq"] Nov 25 23:45:01 crc kubenswrapper[5045]: I1125 23:45:01.574737 5045 generic.go:334] "Generic (PLEG): container finished" podID="76078514-9f03-49b4-a27b-5ea43eec3e93" containerID="3f3b833979481c6544d4ef002fb16de2d567dfcd4fd8bbd5dcdd93ea3534ba42" exitCode=0 Nov 25 23:45:01 crc kubenswrapper[5045]: I1125 23:45:01.574964 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401905-j2zvq" event={"ID":"76078514-9f03-49b4-a27b-5ea43eec3e93","Type":"ContainerDied","Data":"3f3b833979481c6544d4ef002fb16de2d567dfcd4fd8bbd5dcdd93ea3534ba42"} Nov 25 23:45:01 crc kubenswrapper[5045]: I1125 23:45:01.574987 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401905-j2zvq" event={"ID":"76078514-9f03-49b4-a27b-5ea43eec3e93","Type":"ContainerStarted","Data":"8d59acb0872c3a665924b82f20cccf5beef74017ebcf16afe7651cab5c21d599"} Nov 25 23:45:02 crc kubenswrapper[5045]: I1125 23:45:02.988540 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401905-j2zvq" Nov 25 23:45:03 crc kubenswrapper[5045]: I1125 23:45:03.146925 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dfr6p\" (UniqueName: \"kubernetes.io/projected/76078514-9f03-49b4-a27b-5ea43eec3e93-kube-api-access-dfr6p\") pod \"76078514-9f03-49b4-a27b-5ea43eec3e93\" (UID: \"76078514-9f03-49b4-a27b-5ea43eec3e93\") " Nov 25 23:45:03 crc kubenswrapper[5045]: I1125 23:45:03.147586 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/76078514-9f03-49b4-a27b-5ea43eec3e93-config-volume\") pod \"76078514-9f03-49b4-a27b-5ea43eec3e93\" (UID: \"76078514-9f03-49b4-a27b-5ea43eec3e93\") " Nov 25 23:45:03 crc kubenswrapper[5045]: I1125 23:45:03.148174 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/76078514-9f03-49b4-a27b-5ea43eec3e93-secret-volume\") pod \"76078514-9f03-49b4-a27b-5ea43eec3e93\" (UID: \"76078514-9f03-49b4-a27b-5ea43eec3e93\") " Nov 25 23:45:03 crc kubenswrapper[5045]: I1125 23:45:03.148491 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/76078514-9f03-49b4-a27b-5ea43eec3e93-config-volume" (OuterVolumeSpecName: "config-volume") pod "76078514-9f03-49b4-a27b-5ea43eec3e93" (UID: "76078514-9f03-49b4-a27b-5ea43eec3e93"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:45:03 crc kubenswrapper[5045]: I1125 23:45:03.148913 5045 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/76078514-9f03-49b4-a27b-5ea43eec3e93-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 23:45:03 crc kubenswrapper[5045]: I1125 23:45:03.152328 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76078514-9f03-49b4-a27b-5ea43eec3e93-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "76078514-9f03-49b4-a27b-5ea43eec3e93" (UID: "76078514-9f03-49b4-a27b-5ea43eec3e93"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:45:03 crc kubenswrapper[5045]: I1125 23:45:03.155916 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76078514-9f03-49b4-a27b-5ea43eec3e93-kube-api-access-dfr6p" (OuterVolumeSpecName: "kube-api-access-dfr6p") pod "76078514-9f03-49b4-a27b-5ea43eec3e93" (UID: "76078514-9f03-49b4-a27b-5ea43eec3e93"). InnerVolumeSpecName "kube-api-access-dfr6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:45:03 crc kubenswrapper[5045]: I1125 23:45:03.250774 5045 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/76078514-9f03-49b4-a27b-5ea43eec3e93-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 23:45:03 crc kubenswrapper[5045]: I1125 23:45:03.251057 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dfr6p\" (UniqueName: \"kubernetes.io/projected/76078514-9f03-49b4-a27b-5ea43eec3e93-kube-api-access-dfr6p\") on node \"crc\" DevicePath \"\"" Nov 25 23:45:03 crc kubenswrapper[5045]: I1125 23:45:03.596161 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401905-j2zvq" event={"ID":"76078514-9f03-49b4-a27b-5ea43eec3e93","Type":"ContainerDied","Data":"8d59acb0872c3a665924b82f20cccf5beef74017ebcf16afe7651cab5c21d599"} Nov 25 23:45:03 crc kubenswrapper[5045]: I1125 23:45:03.596236 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8d59acb0872c3a665924b82f20cccf5beef74017ebcf16afe7651cab5c21d599" Nov 25 23:45:03 crc kubenswrapper[5045]: I1125 23:45:03.596311 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401905-j2zvq" Nov 25 23:45:04 crc kubenswrapper[5045]: I1125 23:45:04.091607 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4"] Nov 25 23:45:04 crc kubenswrapper[5045]: I1125 23:45:04.100186 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401860-j7gf4"] Nov 25 23:45:04 crc kubenswrapper[5045]: I1125 23:45:04.439247 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e466426d-ad8f-46ce-813b-b0276253e555" path="/var/lib/kubelet/pods/e466426d-ad8f-46ce-813b-b0276253e555/volumes" Nov 25 23:45:09 crc kubenswrapper[5045]: I1125 23:45:09.398124 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-chwv9"] Nov 25 23:45:09 crc kubenswrapper[5045]: E1125 23:45:09.399571 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76078514-9f03-49b4-a27b-5ea43eec3e93" containerName="collect-profiles" Nov 25 23:45:09 crc kubenswrapper[5045]: I1125 23:45:09.399645 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="76078514-9f03-49b4-a27b-5ea43eec3e93" containerName="collect-profiles" Nov 25 23:45:09 crc kubenswrapper[5045]: I1125 23:45:09.399901 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="76078514-9f03-49b4-a27b-5ea43eec3e93" containerName="collect-profiles" Nov 25 23:45:09 crc kubenswrapper[5045]: I1125 23:45:09.401112 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-chwv9" Nov 25 23:45:09 crc kubenswrapper[5045]: I1125 23:45:09.408879 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-chwv9"] Nov 25 23:45:09 crc kubenswrapper[5045]: I1125 23:45:09.501786 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77ceeb6d-f01b-4718-bc17-a086967b2033-catalog-content\") pod \"redhat-operators-chwv9\" (UID: \"77ceeb6d-f01b-4718-bc17-a086967b2033\") " pod="openshift-marketplace/redhat-operators-chwv9" Nov 25 23:45:09 crc kubenswrapper[5045]: I1125 23:45:09.501840 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5lb4\" (UniqueName: \"kubernetes.io/projected/77ceeb6d-f01b-4718-bc17-a086967b2033-kube-api-access-g5lb4\") pod \"redhat-operators-chwv9\" (UID: \"77ceeb6d-f01b-4718-bc17-a086967b2033\") " pod="openshift-marketplace/redhat-operators-chwv9" Nov 25 23:45:09 crc kubenswrapper[5045]: I1125 23:45:09.501941 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77ceeb6d-f01b-4718-bc17-a086967b2033-utilities\") pod \"redhat-operators-chwv9\" (UID: \"77ceeb6d-f01b-4718-bc17-a086967b2033\") " pod="openshift-marketplace/redhat-operators-chwv9" Nov 25 23:45:09 crc kubenswrapper[5045]: I1125 23:45:09.604958 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77ceeb6d-f01b-4718-bc17-a086967b2033-utilities\") pod \"redhat-operators-chwv9\" (UID: \"77ceeb6d-f01b-4718-bc17-a086967b2033\") " pod="openshift-marketplace/redhat-operators-chwv9" Nov 25 23:45:09 crc kubenswrapper[5045]: I1125 23:45:09.605536 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77ceeb6d-f01b-4718-bc17-a086967b2033-utilities\") pod \"redhat-operators-chwv9\" (UID: \"77ceeb6d-f01b-4718-bc17-a086967b2033\") " pod="openshift-marketplace/redhat-operators-chwv9" Nov 25 23:45:09 crc kubenswrapper[5045]: I1125 23:45:09.605659 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77ceeb6d-f01b-4718-bc17-a086967b2033-catalog-content\") pod \"redhat-operators-chwv9\" (UID: \"77ceeb6d-f01b-4718-bc17-a086967b2033\") " pod="openshift-marketplace/redhat-operators-chwv9" Nov 25 23:45:09 crc kubenswrapper[5045]: I1125 23:45:09.605753 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5lb4\" (UniqueName: \"kubernetes.io/projected/77ceeb6d-f01b-4718-bc17-a086967b2033-kube-api-access-g5lb4\") pod \"redhat-operators-chwv9\" (UID: \"77ceeb6d-f01b-4718-bc17-a086967b2033\") " pod="openshift-marketplace/redhat-operators-chwv9" Nov 25 23:45:09 crc kubenswrapper[5045]: I1125 23:45:09.606485 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77ceeb6d-f01b-4718-bc17-a086967b2033-catalog-content\") pod \"redhat-operators-chwv9\" (UID: \"77ceeb6d-f01b-4718-bc17-a086967b2033\") " pod="openshift-marketplace/redhat-operators-chwv9" Nov 25 23:45:09 crc kubenswrapper[5045]: I1125 23:45:09.623982 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5lb4\" (UniqueName: \"kubernetes.io/projected/77ceeb6d-f01b-4718-bc17-a086967b2033-kube-api-access-g5lb4\") pod \"redhat-operators-chwv9\" (UID: \"77ceeb6d-f01b-4718-bc17-a086967b2033\") " pod="openshift-marketplace/redhat-operators-chwv9" Nov 25 23:45:09 crc kubenswrapper[5045]: I1125 23:45:09.725170 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-chwv9" Nov 25 23:45:10 crc kubenswrapper[5045]: I1125 23:45:10.186774 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-chwv9"] Nov 25 23:45:10 crc kubenswrapper[5045]: I1125 23:45:10.674288 5045 generic.go:334] "Generic (PLEG): container finished" podID="77ceeb6d-f01b-4718-bc17-a086967b2033" containerID="0794fede2893f68487ea1cbafef5a5f91f5d7ecf8dee5ab5b04b57d27d2ef572" exitCode=0 Nov 25 23:45:10 crc kubenswrapper[5045]: I1125 23:45:10.674344 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-chwv9" event={"ID":"77ceeb6d-f01b-4718-bc17-a086967b2033","Type":"ContainerDied","Data":"0794fede2893f68487ea1cbafef5a5f91f5d7ecf8dee5ab5b04b57d27d2ef572"} Nov 25 23:45:10 crc kubenswrapper[5045]: I1125 23:45:10.674523 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-chwv9" event={"ID":"77ceeb6d-f01b-4718-bc17-a086967b2033","Type":"ContainerStarted","Data":"4159fdbfecb82841b07787a3339301ac70ed683e3676b308379ff3909a399ee8"} Nov 25 23:45:11 crc kubenswrapper[5045]: I1125 23:45:11.687106 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-chwv9" event={"ID":"77ceeb6d-f01b-4718-bc17-a086967b2033","Type":"ContainerStarted","Data":"9811a9d65f8be3737257d059df5521c9e61bc6ffa8f2ce81d363a1e651e450e9"} Nov 25 23:45:13 crc kubenswrapper[5045]: I1125 23:45:13.715140 5045 generic.go:334] "Generic (PLEG): container finished" podID="77ceeb6d-f01b-4718-bc17-a086967b2033" containerID="9811a9d65f8be3737257d059df5521c9e61bc6ffa8f2ce81d363a1e651e450e9" exitCode=0 Nov 25 23:45:13 crc kubenswrapper[5045]: I1125 23:45:13.715245 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-chwv9" event={"ID":"77ceeb6d-f01b-4718-bc17-a086967b2033","Type":"ContainerDied","Data":"9811a9d65f8be3737257d059df5521c9e61bc6ffa8f2ce81d363a1e651e450e9"} Nov 25 23:45:15 crc kubenswrapper[5045]: I1125 23:45:15.734588 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-chwv9" event={"ID":"77ceeb6d-f01b-4718-bc17-a086967b2033","Type":"ContainerStarted","Data":"f497ece6e3afcc13ad35620e89d58c44c5ffad549858c2c44a8c36596cdc6ece"} Nov 25 23:45:15 crc kubenswrapper[5045]: I1125 23:45:15.763833 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-chwv9" podStartSLOduration=2.687685886 podStartE2EDuration="6.763809412s" podCreationTimestamp="2025-11-25 23:45:09 +0000 UTC" firstStartedPulling="2025-11-25 23:45:10.676032676 +0000 UTC m=+2767.033691788" lastFinishedPulling="2025-11-25 23:45:14.752156192 +0000 UTC m=+2771.109815314" observedRunningTime="2025-11-25 23:45:15.763351618 +0000 UTC m=+2772.121010760" watchObservedRunningTime="2025-11-25 23:45:15.763809412 +0000 UTC m=+2772.121468534" Nov 25 23:45:19 crc kubenswrapper[5045]: I1125 23:45:19.725440 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-chwv9" Nov 25 23:45:19 crc kubenswrapper[5045]: I1125 23:45:19.726132 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-chwv9" Nov 25 23:45:20 crc kubenswrapper[5045]: I1125 23:45:20.793048 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-chwv9" podUID="77ceeb6d-f01b-4718-bc17-a086967b2033" containerName="registry-server" probeResult="failure" output=< Nov 25 23:45:20 crc kubenswrapper[5045]: timeout: failed to connect service ":50051" within 1s Nov 25 23:45:20 crc kubenswrapper[5045]: > Nov 25 23:45:21 crc kubenswrapper[5045]: I1125 23:45:21.466489 5045 scope.go:117] "RemoveContainer" containerID="07f8e9dad9f2efcf3691f68fdf377d08d3cd124f7b2240bd0d98fd897007936e" Nov 25 23:45:29 crc kubenswrapper[5045]: I1125 23:45:29.772650 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-chwv9" Nov 25 23:45:29 crc kubenswrapper[5045]: I1125 23:45:29.822530 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-chwv9" Nov 25 23:45:30 crc kubenswrapper[5045]: I1125 23:45:30.540684 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:45:30 crc kubenswrapper[5045]: I1125 23:45:30.541020 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:45:30 crc kubenswrapper[5045]: I1125 23:45:30.541074 5045 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 23:45:30 crc kubenswrapper[5045]: I1125 23:45:30.541880 5045 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f237bb0a3155d2bd6d4d6edb5b75e2e10e60f5f39cb1458cea502418fd177a1d"} pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 23:45:30 crc kubenswrapper[5045]: I1125 23:45:30.541938 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" containerID="cri-o://f237bb0a3155d2bd6d4d6edb5b75e2e10e60f5f39cb1458cea502418fd177a1d" gracePeriod=600 Nov 25 23:45:30 crc kubenswrapper[5045]: I1125 23:45:30.888158 5045 generic.go:334] "Generic (PLEG): container finished" podID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerID="f237bb0a3155d2bd6d4d6edb5b75e2e10e60f5f39cb1458cea502418fd177a1d" exitCode=0 Nov 25 23:45:30 crc kubenswrapper[5045]: I1125 23:45:30.888213 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerDied","Data":"f237bb0a3155d2bd6d4d6edb5b75e2e10e60f5f39cb1458cea502418fd177a1d"} Nov 25 23:45:30 crc kubenswrapper[5045]: I1125 23:45:30.888507 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerStarted","Data":"22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a"} Nov 25 23:45:30 crc kubenswrapper[5045]: I1125 23:45:30.888527 5045 scope.go:117] "RemoveContainer" containerID="46a0bc24837efe31b64a738f19ce028273debb5c903fce13a19e55331ab599f5" Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.155094 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-chwv9"] Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.155320 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-chwv9" podUID="77ceeb6d-f01b-4718-bc17-a086967b2033" containerName="registry-server" containerID="cri-o://f497ece6e3afcc13ad35620e89d58c44c5ffad549858c2c44a8c36596cdc6ece" gracePeriod=2 Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.654918 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-chwv9" Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.753379 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77ceeb6d-f01b-4718-bc17-a086967b2033-catalog-content\") pod \"77ceeb6d-f01b-4718-bc17-a086967b2033\" (UID: \"77ceeb6d-f01b-4718-bc17-a086967b2033\") " Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.753493 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5lb4\" (UniqueName: \"kubernetes.io/projected/77ceeb6d-f01b-4718-bc17-a086967b2033-kube-api-access-g5lb4\") pod \"77ceeb6d-f01b-4718-bc17-a086967b2033\" (UID: \"77ceeb6d-f01b-4718-bc17-a086967b2033\") " Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.753541 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77ceeb6d-f01b-4718-bc17-a086967b2033-utilities\") pod \"77ceeb6d-f01b-4718-bc17-a086967b2033\" (UID: \"77ceeb6d-f01b-4718-bc17-a086967b2033\") " Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.758044 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77ceeb6d-f01b-4718-bc17-a086967b2033-utilities" (OuterVolumeSpecName: "utilities") pod "77ceeb6d-f01b-4718-bc17-a086967b2033" (UID: "77ceeb6d-f01b-4718-bc17-a086967b2033"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.766205 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77ceeb6d-f01b-4718-bc17-a086967b2033-kube-api-access-g5lb4" (OuterVolumeSpecName: "kube-api-access-g5lb4") pod "77ceeb6d-f01b-4718-bc17-a086967b2033" (UID: "77ceeb6d-f01b-4718-bc17-a086967b2033"). InnerVolumeSpecName "kube-api-access-g5lb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.855627 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5lb4\" (UniqueName: \"kubernetes.io/projected/77ceeb6d-f01b-4718-bc17-a086967b2033-kube-api-access-g5lb4\") on node \"crc\" DevicePath \"\"" Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.855661 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77ceeb6d-f01b-4718-bc17-a086967b2033-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.863804 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77ceeb6d-f01b-4718-bc17-a086967b2033-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "77ceeb6d-f01b-4718-bc17-a086967b2033" (UID: "77ceeb6d-f01b-4718-bc17-a086967b2033"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.902068 5045 generic.go:334] "Generic (PLEG): container finished" podID="77ceeb6d-f01b-4718-bc17-a086967b2033" containerID="f497ece6e3afcc13ad35620e89d58c44c5ffad549858c2c44a8c36596cdc6ece" exitCode=0 Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.902431 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-chwv9" Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.902440 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-chwv9" event={"ID":"77ceeb6d-f01b-4718-bc17-a086967b2033","Type":"ContainerDied","Data":"f497ece6e3afcc13ad35620e89d58c44c5ffad549858c2c44a8c36596cdc6ece"} Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.903095 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-chwv9" event={"ID":"77ceeb6d-f01b-4718-bc17-a086967b2033","Type":"ContainerDied","Data":"4159fdbfecb82841b07787a3339301ac70ed683e3676b308379ff3909a399ee8"} Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.903121 5045 scope.go:117] "RemoveContainer" containerID="f497ece6e3afcc13ad35620e89d58c44c5ffad549858c2c44a8c36596cdc6ece" Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.947584 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-chwv9"] Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.952659 5045 scope.go:117] "RemoveContainer" containerID="9811a9d65f8be3737257d059df5521c9e61bc6ffa8f2ce81d363a1e651e450e9" Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.957316 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-chwv9"] Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.957379 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77ceeb6d-f01b-4718-bc17-a086967b2033-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:45:31 crc kubenswrapper[5045]: I1125 23:45:31.980304 5045 scope.go:117] "RemoveContainer" containerID="0794fede2893f68487ea1cbafef5a5f91f5d7ecf8dee5ab5b04b57d27d2ef572" Nov 25 23:45:32 crc kubenswrapper[5045]: I1125 23:45:32.029660 5045 scope.go:117] "RemoveContainer" containerID="f497ece6e3afcc13ad35620e89d58c44c5ffad549858c2c44a8c36596cdc6ece" Nov 25 23:45:32 crc kubenswrapper[5045]: E1125 23:45:32.030222 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f497ece6e3afcc13ad35620e89d58c44c5ffad549858c2c44a8c36596cdc6ece\": container with ID starting with f497ece6e3afcc13ad35620e89d58c44c5ffad549858c2c44a8c36596cdc6ece not found: ID does not exist" containerID="f497ece6e3afcc13ad35620e89d58c44c5ffad549858c2c44a8c36596cdc6ece" Nov 25 23:45:32 crc kubenswrapper[5045]: I1125 23:45:32.030267 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f497ece6e3afcc13ad35620e89d58c44c5ffad549858c2c44a8c36596cdc6ece"} err="failed to get container status \"f497ece6e3afcc13ad35620e89d58c44c5ffad549858c2c44a8c36596cdc6ece\": rpc error: code = NotFound desc = could not find container \"f497ece6e3afcc13ad35620e89d58c44c5ffad549858c2c44a8c36596cdc6ece\": container with ID starting with f497ece6e3afcc13ad35620e89d58c44c5ffad549858c2c44a8c36596cdc6ece not found: ID does not exist" Nov 25 23:45:32 crc kubenswrapper[5045]: I1125 23:45:32.030295 5045 scope.go:117] "RemoveContainer" containerID="9811a9d65f8be3737257d059df5521c9e61bc6ffa8f2ce81d363a1e651e450e9" Nov 25 23:45:32 crc kubenswrapper[5045]: E1125 23:45:32.030799 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9811a9d65f8be3737257d059df5521c9e61bc6ffa8f2ce81d363a1e651e450e9\": container with ID starting with 9811a9d65f8be3737257d059df5521c9e61bc6ffa8f2ce81d363a1e651e450e9 not found: ID does not exist" containerID="9811a9d65f8be3737257d059df5521c9e61bc6ffa8f2ce81d363a1e651e450e9" Nov 25 23:45:32 crc kubenswrapper[5045]: I1125 23:45:32.030949 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9811a9d65f8be3737257d059df5521c9e61bc6ffa8f2ce81d363a1e651e450e9"} err="failed to get container status \"9811a9d65f8be3737257d059df5521c9e61bc6ffa8f2ce81d363a1e651e450e9\": rpc error: code = NotFound desc = could not find container \"9811a9d65f8be3737257d059df5521c9e61bc6ffa8f2ce81d363a1e651e450e9\": container with ID starting with 9811a9d65f8be3737257d059df5521c9e61bc6ffa8f2ce81d363a1e651e450e9 not found: ID does not exist" Nov 25 23:45:32 crc kubenswrapper[5045]: I1125 23:45:32.031102 5045 scope.go:117] "RemoveContainer" containerID="0794fede2893f68487ea1cbafef5a5f91f5d7ecf8dee5ab5b04b57d27d2ef572" Nov 25 23:45:32 crc kubenswrapper[5045]: E1125 23:45:32.031443 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0794fede2893f68487ea1cbafef5a5f91f5d7ecf8dee5ab5b04b57d27d2ef572\": container with ID starting with 0794fede2893f68487ea1cbafef5a5f91f5d7ecf8dee5ab5b04b57d27d2ef572 not found: ID does not exist" containerID="0794fede2893f68487ea1cbafef5a5f91f5d7ecf8dee5ab5b04b57d27d2ef572" Nov 25 23:45:32 crc kubenswrapper[5045]: I1125 23:45:32.031474 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0794fede2893f68487ea1cbafef5a5f91f5d7ecf8dee5ab5b04b57d27d2ef572"} err="failed to get container status \"0794fede2893f68487ea1cbafef5a5f91f5d7ecf8dee5ab5b04b57d27d2ef572\": rpc error: code = NotFound desc = could not find container \"0794fede2893f68487ea1cbafef5a5f91f5d7ecf8dee5ab5b04b57d27d2ef572\": container with ID starting with 0794fede2893f68487ea1cbafef5a5f91f5d7ecf8dee5ab5b04b57d27d2ef572 not found: ID does not exist" Nov 25 23:45:32 crc kubenswrapper[5045]: I1125 23:45:32.412380 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77ceeb6d-f01b-4718-bc17-a086967b2033" path="/var/lib/kubelet/pods/77ceeb6d-f01b-4718-bc17-a086967b2033/volumes" Nov 25 23:47:30 crc kubenswrapper[5045]: I1125 23:47:30.541139 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:47:30 crc kubenswrapper[5045]: I1125 23:47:30.541702 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:47:35 crc kubenswrapper[5045]: I1125 23:47:35.251000 5045 generic.go:334] "Generic (PLEG): container finished" podID="886ae822-5a4e-4578-a137-1322687c1a77" containerID="ad3bea810d2c4ee7bfe72254bce7da94b4981f4f71f26f21682800d45f99ea08" exitCode=0 Nov 25 23:47:35 crc kubenswrapper[5045]: I1125 23:47:35.251068 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" event={"ID":"886ae822-5a4e-4578-a137-1322687c1a77","Type":"ContainerDied","Data":"ad3bea810d2c4ee7bfe72254bce7da94b4981f4f71f26f21682800d45f99ea08"} Nov 25 23:47:36 crc kubenswrapper[5045]: I1125 23:47:36.725044 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:47:36 crc kubenswrapper[5045]: I1125 23:47:36.825655 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-ceph\") pod \"886ae822-5a4e-4578-a137-1322687c1a77\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " Nov 25 23:47:36 crc kubenswrapper[5045]: I1125 23:47:36.825987 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-libvirt-combined-ca-bundle\") pod \"886ae822-5a4e-4578-a137-1322687c1a77\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " Nov 25 23:47:36 crc kubenswrapper[5045]: I1125 23:47:36.826171 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-libvirt-secret-0\") pod \"886ae822-5a4e-4578-a137-1322687c1a77\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " Nov 25 23:47:36 crc kubenswrapper[5045]: I1125 23:47:36.826262 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-72p7h\" (UniqueName: \"kubernetes.io/projected/886ae822-5a4e-4578-a137-1322687c1a77-kube-api-access-72p7h\") pod \"886ae822-5a4e-4578-a137-1322687c1a77\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " Nov 25 23:47:36 crc kubenswrapper[5045]: I1125 23:47:36.826300 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-inventory\") pod \"886ae822-5a4e-4578-a137-1322687c1a77\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " Nov 25 23:47:36 crc kubenswrapper[5045]: I1125 23:47:36.826359 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-ssh-key\") pod \"886ae822-5a4e-4578-a137-1322687c1a77\" (UID: \"886ae822-5a4e-4578-a137-1322687c1a77\") " Nov 25 23:47:36 crc kubenswrapper[5045]: I1125 23:47:36.834008 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/886ae822-5a4e-4578-a137-1322687c1a77-kube-api-access-72p7h" (OuterVolumeSpecName: "kube-api-access-72p7h") pod "886ae822-5a4e-4578-a137-1322687c1a77" (UID: "886ae822-5a4e-4578-a137-1322687c1a77"). InnerVolumeSpecName "kube-api-access-72p7h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:47:36 crc kubenswrapper[5045]: I1125 23:47:36.834240 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-ceph" (OuterVolumeSpecName: "ceph") pod "886ae822-5a4e-4578-a137-1322687c1a77" (UID: "886ae822-5a4e-4578-a137-1322687c1a77"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:47:36 crc kubenswrapper[5045]: I1125 23:47:36.836355 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "886ae822-5a4e-4578-a137-1322687c1a77" (UID: "886ae822-5a4e-4578-a137-1322687c1a77"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:47:36 crc kubenswrapper[5045]: I1125 23:47:36.859538 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "886ae822-5a4e-4578-a137-1322687c1a77" (UID: "886ae822-5a4e-4578-a137-1322687c1a77"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:47:36 crc kubenswrapper[5045]: I1125 23:47:36.868418 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-inventory" (OuterVolumeSpecName: "inventory") pod "886ae822-5a4e-4578-a137-1322687c1a77" (UID: "886ae822-5a4e-4578-a137-1322687c1a77"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:47:36 crc kubenswrapper[5045]: I1125 23:47:36.879365 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "886ae822-5a4e-4578-a137-1322687c1a77" (UID: "886ae822-5a4e-4578-a137-1322687c1a77"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:47:36 crc kubenswrapper[5045]: I1125 23:47:36.928806 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:47:36 crc kubenswrapper[5045]: I1125 23:47:36.928854 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:47:36 crc kubenswrapper[5045]: I1125 23:47:36.928876 5045 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:47:36 crc kubenswrapper[5045]: I1125 23:47:36.928896 5045 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 25 23:47:36 crc kubenswrapper[5045]: I1125 23:47:36.928914 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-72p7h\" (UniqueName: \"kubernetes.io/projected/886ae822-5a4e-4578-a137-1322687c1a77-kube-api-access-72p7h\") on node \"crc\" DevicePath \"\"" Nov 25 23:47:36 crc kubenswrapper[5045]: I1125 23:47:36.928931 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/886ae822-5a4e-4578-a137-1322687c1a77-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.283745 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" event={"ID":"886ae822-5a4e-4578-a137-1322687c1a77","Type":"ContainerDied","Data":"5c67dc67831aff5b3a57274e6e6fad38b01d832b581acdcb72d18b7d6f314458"} Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.283831 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5c67dc67831aff5b3a57274e6e6fad38b01d832b581acdcb72d18b7d6f314458" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.284257 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.412155 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk"] Nov 25 23:47:37 crc kubenswrapper[5045]: E1125 23:47:37.412567 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77ceeb6d-f01b-4718-bc17-a086967b2033" containerName="registry-server" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.412586 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="77ceeb6d-f01b-4718-bc17-a086967b2033" containerName="registry-server" Nov 25 23:47:37 crc kubenswrapper[5045]: E1125 23:47:37.412614 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="886ae822-5a4e-4578-a137-1322687c1a77" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.412624 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="886ae822-5a4e-4578-a137-1322687c1a77" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 23:47:37 crc kubenswrapper[5045]: E1125 23:47:37.412646 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77ceeb6d-f01b-4718-bc17-a086967b2033" containerName="extract-utilities" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.412654 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="77ceeb6d-f01b-4718-bc17-a086967b2033" containerName="extract-utilities" Nov 25 23:47:37 crc kubenswrapper[5045]: E1125 23:47:37.412666 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77ceeb6d-f01b-4718-bc17-a086967b2033" containerName="extract-content" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.412675 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="77ceeb6d-f01b-4718-bc17-a086967b2033" containerName="extract-content" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.412957 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="886ae822-5a4e-4578-a137-1322687c1a77" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.412985 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="77ceeb6d-f01b-4718-bc17-a086967b2033" containerName="registry-server" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.413731 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.417231 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.417461 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.417613 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n77wq" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.417923 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.418093 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.418309 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.418467 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ceph-nova" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.418786 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.424941 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.433807 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk"] Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.456418 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.456675 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.456894 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.456983 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.457081 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.457170 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.457295 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.457369 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cw5lz\" (UniqueName: \"kubernetes.io/projected/2a2c3598-f1cb-4b09-8410-48442361a88a-kube-api-access-cw5lz\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.457456 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.457527 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/2a2c3598-f1cb-4b09-8410-48442361a88a-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.457603 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.559488 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.559590 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.559698 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.559768 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cw5lz\" (UniqueName: \"kubernetes.io/projected/2a2c3598-f1cb-4b09-8410-48442361a88a-kube-api-access-cw5lz\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.559819 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.559858 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/2a2c3598-f1cb-4b09-8410-48442361a88a-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.560752 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.560859 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.560933 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.560992 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.561026 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.561172 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/2a2c3598-f1cb-4b09-8410-48442361a88a-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.561828 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.563940 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.564559 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.564686 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.567029 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.567706 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.568581 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.568839 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.571836 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.580153 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cw5lz\" (UniqueName: \"kubernetes.io/projected/2a2c3598-f1cb-4b09-8410-48442361a88a-kube-api-access-cw5lz\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:37 crc kubenswrapper[5045]: I1125 23:47:37.738702 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:47:38 crc kubenswrapper[5045]: I1125 23:47:38.318587 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk"] Nov 25 23:47:39 crc kubenswrapper[5045]: I1125 23:47:39.309257 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" event={"ID":"2a2c3598-f1cb-4b09-8410-48442361a88a","Type":"ContainerStarted","Data":"eb674031752ed657ec760732428c0ac6bc91cb55b385971e37d661abdb2bd5d9"} Nov 25 23:47:39 crc kubenswrapper[5045]: I1125 23:47:39.310258 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" event={"ID":"2a2c3598-f1cb-4b09-8410-48442361a88a","Type":"ContainerStarted","Data":"4ea2bc0aff6f4917a1a76a79569ea3f94b8b87f7d1d05bcf83b104561b5d0434"} Nov 25 23:47:39 crc kubenswrapper[5045]: I1125 23:47:39.347218 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" podStartSLOduration=1.6903603980000002 podStartE2EDuration="2.347197889s" podCreationTimestamp="2025-11-25 23:47:37 +0000 UTC" firstStartedPulling="2025-11-25 23:47:38.329404859 +0000 UTC m=+2914.687064011" lastFinishedPulling="2025-11-25 23:47:38.98624239 +0000 UTC m=+2915.343901502" observedRunningTime="2025-11-25 23:47:39.343188143 +0000 UTC m=+2915.700847285" watchObservedRunningTime="2025-11-25 23:47:39.347197889 +0000 UTC m=+2915.704857011" Nov 25 23:48:00 crc kubenswrapper[5045]: I1125 23:48:00.541612 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:48:00 crc kubenswrapper[5045]: I1125 23:48:00.542812 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:48:30 crc kubenswrapper[5045]: I1125 23:48:30.541215 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:48:30 crc kubenswrapper[5045]: I1125 23:48:30.542169 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:48:30 crc kubenswrapper[5045]: I1125 23:48:30.542238 5045 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 23:48:30 crc kubenswrapper[5045]: I1125 23:48:30.543234 5045 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a"} pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 23:48:30 crc kubenswrapper[5045]: I1125 23:48:30.543326 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" containerID="cri-o://22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" gracePeriod=600 Nov 25 23:48:30 crc kubenswrapper[5045]: E1125 23:48:30.669701 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:48:31 crc kubenswrapper[5045]: I1125 23:48:31.029654 5045 generic.go:334] "Generic (PLEG): container finished" podID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" exitCode=0 Nov 25 23:48:31 crc kubenswrapper[5045]: I1125 23:48:31.029786 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerDied","Data":"22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a"} Nov 25 23:48:31 crc kubenswrapper[5045]: I1125 23:48:31.029862 5045 scope.go:117] "RemoveContainer" containerID="f237bb0a3155d2bd6d4d6edb5b75e2e10e60f5f39cb1458cea502418fd177a1d" Nov 25 23:48:31 crc kubenswrapper[5045]: I1125 23:48:31.030912 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:48:31 crc kubenswrapper[5045]: E1125 23:48:31.031644 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:48:46 crc kubenswrapper[5045]: I1125 23:48:46.397457 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:48:46 crc kubenswrapper[5045]: E1125 23:48:46.398556 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:48:57 crc kubenswrapper[5045]: I1125 23:48:57.397545 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:48:57 crc kubenswrapper[5045]: E1125 23:48:57.398798 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:49:12 crc kubenswrapper[5045]: I1125 23:49:12.397755 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:49:12 crc kubenswrapper[5045]: E1125 23:49:12.399038 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:49:26 crc kubenswrapper[5045]: I1125 23:49:26.397658 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:49:26 crc kubenswrapper[5045]: E1125 23:49:26.398863 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:49:37 crc kubenswrapper[5045]: I1125 23:49:37.397213 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:49:37 crc kubenswrapper[5045]: E1125 23:49:37.398480 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:49:50 crc kubenswrapper[5045]: I1125 23:49:50.397033 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:49:50 crc kubenswrapper[5045]: E1125 23:49:50.397966 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:50:04 crc kubenswrapper[5045]: I1125 23:50:04.408425 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:50:04 crc kubenswrapper[5045]: E1125 23:50:04.409458 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:50:18 crc kubenswrapper[5045]: I1125 23:50:18.397061 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:50:18 crc kubenswrapper[5045]: E1125 23:50:18.398352 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:50:32 crc kubenswrapper[5045]: I1125 23:50:32.397549 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:50:32 crc kubenswrapper[5045]: E1125 23:50:32.399601 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:50:46 crc kubenswrapper[5045]: I1125 23:50:46.397357 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:50:46 crc kubenswrapper[5045]: E1125 23:50:46.398801 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:50:57 crc kubenswrapper[5045]: I1125 23:50:57.396786 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:50:57 crc kubenswrapper[5045]: E1125 23:50:57.398065 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:51:02 crc kubenswrapper[5045]: I1125 23:51:02.799372 5045 generic.go:334] "Generic (PLEG): container finished" podID="2a2c3598-f1cb-4b09-8410-48442361a88a" containerID="eb674031752ed657ec760732428c0ac6bc91cb55b385971e37d661abdb2bd5d9" exitCode=0 Nov 25 23:51:02 crc kubenswrapper[5045]: I1125 23:51:02.799398 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" event={"ID":"2a2c3598-f1cb-4b09-8410-48442361a88a","Type":"ContainerDied","Data":"eb674031752ed657ec760732428c0ac6bc91cb55b385971e37d661abdb2bd5d9"} Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.250676 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.425407 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-extra-config-0\") pod \"2a2c3598-f1cb-4b09-8410-48442361a88a\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.425459 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-migration-ssh-key-1\") pod \"2a2c3598-f1cb-4b09-8410-48442361a88a\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.425476 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-cell1-compute-config-1\") pod \"2a2c3598-f1cb-4b09-8410-48442361a88a\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.425577 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cw5lz\" (UniqueName: \"kubernetes.io/projected/2a2c3598-f1cb-4b09-8410-48442361a88a-kube-api-access-cw5lz\") pod \"2a2c3598-f1cb-4b09-8410-48442361a88a\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.425657 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-ceph\") pod \"2a2c3598-f1cb-4b09-8410-48442361a88a\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.425744 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-migration-ssh-key-0\") pod \"2a2c3598-f1cb-4b09-8410-48442361a88a\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.425776 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-custom-ceph-combined-ca-bundle\") pod \"2a2c3598-f1cb-4b09-8410-48442361a88a\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.425827 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-cell1-compute-config-0\") pod \"2a2c3598-f1cb-4b09-8410-48442361a88a\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.425862 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-inventory\") pod \"2a2c3598-f1cb-4b09-8410-48442361a88a\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.425929 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-ssh-key\") pod \"2a2c3598-f1cb-4b09-8410-48442361a88a\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.425961 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/2a2c3598-f1cb-4b09-8410-48442361a88a-ceph-nova-0\") pod \"2a2c3598-f1cb-4b09-8410-48442361a88a\" (UID: \"2a2c3598-f1cb-4b09-8410-48442361a88a\") " Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.432247 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-ceph" (OuterVolumeSpecName: "ceph") pod "2a2c3598-f1cb-4b09-8410-48442361a88a" (UID: "2a2c3598-f1cb-4b09-8410-48442361a88a"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.432455 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a2c3598-f1cb-4b09-8410-48442361a88a-kube-api-access-cw5lz" (OuterVolumeSpecName: "kube-api-access-cw5lz") pod "2a2c3598-f1cb-4b09-8410-48442361a88a" (UID: "2a2c3598-f1cb-4b09-8410-48442361a88a"). InnerVolumeSpecName "kube-api-access-cw5lz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.447280 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-custom-ceph-combined-ca-bundle" (OuterVolumeSpecName: "nova-custom-ceph-combined-ca-bundle") pod "2a2c3598-f1cb-4b09-8410-48442361a88a" (UID: "2a2c3598-f1cb-4b09-8410-48442361a88a"). InnerVolumeSpecName "nova-custom-ceph-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.449350 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "2a2c3598-f1cb-4b09-8410-48442361a88a" (UID: "2a2c3598-f1cb-4b09-8410-48442361a88a"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.458026 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a2c3598-f1cb-4b09-8410-48442361a88a-ceph-nova-0" (OuterVolumeSpecName: "ceph-nova-0") pod "2a2c3598-f1cb-4b09-8410-48442361a88a" (UID: "2a2c3598-f1cb-4b09-8410-48442361a88a"). InnerVolumeSpecName "ceph-nova-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.458415 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "2a2c3598-f1cb-4b09-8410-48442361a88a" (UID: "2a2c3598-f1cb-4b09-8410-48442361a88a"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.460443 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "2a2c3598-f1cb-4b09-8410-48442361a88a" (UID: "2a2c3598-f1cb-4b09-8410-48442361a88a"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.463261 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "2a2c3598-f1cb-4b09-8410-48442361a88a" (UID: "2a2c3598-f1cb-4b09-8410-48442361a88a"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.464362 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "2a2c3598-f1cb-4b09-8410-48442361a88a" (UID: "2a2c3598-f1cb-4b09-8410-48442361a88a"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.481436 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2a2c3598-f1cb-4b09-8410-48442361a88a" (UID: "2a2c3598-f1cb-4b09-8410-48442361a88a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.483932 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-inventory" (OuterVolumeSpecName: "inventory") pod "2a2c3598-f1cb-4b09-8410-48442361a88a" (UID: "2a2c3598-f1cb-4b09-8410-48442361a88a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.528680 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.528730 5045 reconciler_common.go:293] "Volume detached for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/2a2c3598-f1cb-4b09-8410-48442361a88a-ceph-nova-0\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.528744 5045 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.528760 5045 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.528772 5045 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.528782 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cw5lz\" (UniqueName: \"kubernetes.io/projected/2a2c3598-f1cb-4b09-8410-48442361a88a-kube-api-access-cw5lz\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.528793 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.528803 5045 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.528813 5045 reconciler_common.go:293] "Volume detached for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-custom-ceph-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.528823 5045 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.528834 5045 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a2c3598-f1cb-4b09-8410-48442361a88a-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.822467 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" event={"ID":"2a2c3598-f1cb-4b09-8410-48442361a88a","Type":"ContainerDied","Data":"4ea2bc0aff6f4917a1a76a79569ea3f94b8b87f7d1d05bcf83b104561b5d0434"} Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.822752 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ea2bc0aff6f4917a1a76a79569ea3f94b8b87f7d1d05bcf83b104561b5d0434" Nov 25 23:51:04 crc kubenswrapper[5045]: I1125 23:51:04.822586 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk" Nov 25 23:51:08 crc kubenswrapper[5045]: I1125 23:51:08.397002 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:51:08 crc kubenswrapper[5045]: E1125 23:51:08.398600 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:51:18 crc kubenswrapper[5045]: I1125 23:51:18.950677 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 25 23:51:18 crc kubenswrapper[5045]: E1125 23:51:18.952867 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a2c3598-f1cb-4b09-8410-48442361a88a" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 25 23:51:18 crc kubenswrapper[5045]: I1125 23:51:18.952990 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a2c3598-f1cb-4b09-8410-48442361a88a" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 25 23:51:18 crc kubenswrapper[5045]: I1125 23:51:18.953286 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a2c3598-f1cb-4b09-8410-48442361a88a" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 25 23:51:18 crc kubenswrapper[5045]: I1125 23:51:18.954495 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:18 crc kubenswrapper[5045]: I1125 23:51:18.961208 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 23:51:18 crc kubenswrapper[5045]: I1125 23:51:18.961302 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-volume1-config-data" Nov 25 23:51:18 crc kubenswrapper[5045]: I1125 23:51:18.965001 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.026106 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.027744 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.029892 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.039180 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.050518 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.050578 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-run\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.050611 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.050631 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.050649 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmvbx\" (UniqueName: \"kubernetes.io/projected/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-kube-api-access-bmvbx\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.050667 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.050820 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.050871 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.050896 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.050935 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.050983 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d86361ad-5146-475b-b0d4-c505b002904b-config-data\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051026 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051092 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d86361ad-5146-475b-b0d4-c505b002904b-ceph\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051136 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051191 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051239 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-dev\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051260 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-etc-nvme\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051297 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-sys\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051331 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051351 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d86361ad-5146-475b-b0d4-c505b002904b-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051370 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-lib-modules\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051388 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051407 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-run\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051427 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051463 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d86361ad-5146-475b-b0d4-c505b002904b-config-data-custom\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051479 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n97n4\" (UniqueName: \"kubernetes.io/projected/d86361ad-5146-475b-b0d4-c505b002904b-kube-api-access-n97n4\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051530 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051585 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-dev\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051605 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-sys\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051629 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d86361ad-5146-475b-b0d4-c505b002904b-scripts\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051678 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.051693 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.152876 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-run\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.152923 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.152947 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.152966 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmvbx\" (UniqueName: \"kubernetes.io/projected/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-kube-api-access-bmvbx\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.152984 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.152987 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-run\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153064 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153012 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153130 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153152 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153161 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153190 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153219 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153226 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d86361ad-5146-475b-b0d4-c505b002904b-config-data\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153269 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153320 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153339 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d86361ad-5146-475b-b0d4-c505b002904b-ceph\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153356 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153364 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153381 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153389 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153510 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153554 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-dev\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153571 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-etc-nvme\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153612 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-sys\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153650 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153670 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d86361ad-5146-475b-b0d4-c505b002904b-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153689 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-lib-modules\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153725 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153747 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-run\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153769 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153789 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d86361ad-5146-475b-b0d4-c505b002904b-config-data-custom\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153811 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n97n4\" (UniqueName: \"kubernetes.io/projected/d86361ad-5146-475b-b0d4-c505b002904b-kube-api-access-n97n4\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153870 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153926 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-dev\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153946 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-sys\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.153976 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d86361ad-5146-475b-b0d4-c505b002904b-scripts\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.154033 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.154048 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.154067 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.154173 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-run\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.154272 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.154298 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-dev\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.154329 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-etc-nvme\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.154349 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-sys\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.154436 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.154947 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-dev\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.155008 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.156399 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-lib-modules\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.157073 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.157137 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d86361ad-5146-475b-b0d4-c505b002904b-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.157183 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-sys\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.160135 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.160173 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.160138 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d86361ad-5146-475b-b0d4-c505b002904b-scripts\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.160556 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d86361ad-5146-475b-b0d4-c505b002904b-ceph\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.160865 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d86361ad-5146-475b-b0d4-c505b002904b-config-data-custom\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.161188 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.161157 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d86361ad-5146-475b-b0d4-c505b002904b-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.163483 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.164553 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d86361ad-5146-475b-b0d4-c505b002904b-config-data\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.169933 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmvbx\" (UniqueName: \"kubernetes.io/projected/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-kube-api-access-bmvbx\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.171181 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3dcd1a1a-085e-472a-8dab-788bba3c3ce4-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"3dcd1a1a-085e-472a-8dab-788bba3c3ce4\") " pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.171475 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n97n4\" (UniqueName: \"kubernetes.io/projected/d86361ad-5146-475b-b0d4-c505b002904b-kube-api-access-n97n4\") pod \"cinder-backup-0\" (UID: \"d86361ad-5146-475b-b0d4-c505b002904b\") " pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.271388 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.347853 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.807794 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-create-2t5tj"] Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.809457 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-2t5tj" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.832038 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-2t5tj"] Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.841901 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-79677cc74f-49kdf"] Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.843335 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-79677cc74f-49kdf" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.845638 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.848431 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.848785 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-r5t6s" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.852085 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.852940 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.853508 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.855012 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.855311 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.855421 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.855576 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-pjcxf" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.868249 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cml67\" (UniqueName: \"kubernetes.io/projected/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-kube-api-access-cml67\") pod \"horizon-79677cc74f-49kdf\" (UID: \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\") " pod="openstack/horizon-79677cc74f-49kdf" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.868304 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgjvx\" (UniqueName: \"kubernetes.io/projected/c091d493-4263-41da-a276-6dc859d7d5e1-kube-api-access-sgjvx\") pod \"manila-db-create-2t5tj\" (UID: \"c091d493-4263-41da-a276-6dc859d7d5e1\") " pod="openstack/manila-db-create-2t5tj" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.868341 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c091d493-4263-41da-a276-6dc859d7d5e1-operator-scripts\") pod \"manila-db-create-2t5tj\" (UID: \"c091d493-4263-41da-a276-6dc859d7d5e1\") " pod="openstack/manila-db-create-2t5tj" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.868384 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-horizon-secret-key\") pod \"horizon-79677cc74f-49kdf\" (UID: \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\") " pod="openstack/horizon-79677cc74f-49kdf" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.868409 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-scripts\") pod \"horizon-79677cc74f-49kdf\" (UID: \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\") " pod="openstack/horizon-79677cc74f-49kdf" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.868446 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-config-data\") pod \"horizon-79677cc74f-49kdf\" (UID: \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\") " pod="openstack/horizon-79677cc74f-49kdf" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.868469 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-logs\") pod \"horizon-79677cc74f-49kdf\" (UID: \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\") " pod="openstack/horizon-79677cc74f-49kdf" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.894300 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.911869 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-79677cc74f-49kdf"] Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.940306 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-2c9f-account-create-update-zpp6n"] Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.941558 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-2c9f-account-create-update-zpp6n" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.943329 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-db-secret" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.958319 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 23:51:19 crc kubenswrapper[5045]: E1125 23:51:19.959140 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ceph combined-ca-bundle config-data glance httpd-run kube-api-access-g96gg logs public-tls-certs scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/glance-default-external-api-0" podUID="e3edb445-2398-4754-b292-a97fa8d32876" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.969910 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.969990 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-horizon-secret-key\") pod \"horizon-79677cc74f-49kdf\" (UID: \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\") " pod="openstack/horizon-79677cc74f-49kdf" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.970023 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-scripts\") pod \"horizon-79677cc74f-49kdf\" (UID: \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\") " pod="openstack/horizon-79677cc74f-49kdf" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.970067 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-config-data\") pod \"horizon-79677cc74f-49kdf\" (UID: \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\") " pod="openstack/horizon-79677cc74f-49kdf" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.970086 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e3edb445-2398-4754-b292-a97fa8d32876-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.970108 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-logs\") pod \"horizon-79677cc74f-49kdf\" (UID: \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\") " pod="openstack/horizon-79677cc74f-49kdf" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.970127 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.970153 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g96gg\" (UniqueName: \"kubernetes.io/projected/e3edb445-2398-4754-b292-a97fa8d32876-kube-api-access-g96gg\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.970187 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.970206 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e3edb445-2398-4754-b292-a97fa8d32876-logs\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.970221 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-config-data\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.970239 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cml67\" (UniqueName: \"kubernetes.io/projected/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-kube-api-access-cml67\") pod \"horizon-79677cc74f-49kdf\" (UID: \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\") " pod="openstack/horizon-79677cc74f-49kdf" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.970253 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-scripts\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.970281 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/e3edb445-2398-4754-b292-a97fa8d32876-ceph\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.970302 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgjvx\" (UniqueName: \"kubernetes.io/projected/c091d493-4263-41da-a276-6dc859d7d5e1-kube-api-access-sgjvx\") pod \"manila-db-create-2t5tj\" (UID: \"c091d493-4263-41da-a276-6dc859d7d5e1\") " pod="openstack/manila-db-create-2t5tj" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.970341 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c091d493-4263-41da-a276-6dc859d7d5e1-operator-scripts\") pod \"manila-db-create-2t5tj\" (UID: \"c091d493-4263-41da-a276-6dc859d7d5e1\") " pod="openstack/manila-db-create-2t5tj" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.971011 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c091d493-4263-41da-a276-6dc859d7d5e1-operator-scripts\") pod \"manila-db-create-2t5tj\" (UID: \"c091d493-4263-41da-a276-6dc859d7d5e1\") " pod="openstack/manila-db-create-2t5tj" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.974194 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-2c9f-account-create-update-zpp6n"] Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.975451 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-scripts\") pod \"horizon-79677cc74f-49kdf\" (UID: \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\") " pod="openstack/horizon-79677cc74f-49kdf" Nov 25 23:51:19 crc kubenswrapper[5045]: I1125 23:51:19.990860 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-horizon-secret-key\") pod \"horizon-79677cc74f-49kdf\" (UID: \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\") " pod="openstack/horizon-79677cc74f-49kdf" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:19.995302 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-logs\") pod \"horizon-79677cc74f-49kdf\" (UID: \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\") " pod="openstack/horizon-79677cc74f-49kdf" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:19.995554 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.007754 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-config-data\") pod \"horizon-79677cc74f-49kdf\" (UID: \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\") " pod="openstack/horizon-79677cc74f-49kdf" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.010039 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgjvx\" (UniqueName: \"kubernetes.io/projected/c091d493-4263-41da-a276-6dc859d7d5e1-kube-api-access-sgjvx\") pod \"manila-db-create-2t5tj\" (UID: \"c091d493-4263-41da-a276-6dc859d7d5e1\") " pod="openstack/manila-db-create-2t5tj" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.010916 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cml67\" (UniqueName: \"kubernetes.io/projected/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-kube-api-access-cml67\") pod \"horizon-79677cc74f-49kdf\" (UID: \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\") " pod="openstack/horizon-79677cc74f-49kdf" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.026103 5045 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.054294 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.066587 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"d86361ad-5146-475b-b0d4-c505b002904b","Type":"ContainerStarted","Data":"8402ab20ac1f7dfbbd1864b3973aca6eb9f6f6a42b9775a487255f5bad719691"} Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.066706 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.069991 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.075526 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.075776 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.076557 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.078524 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/016700b4-9818-4d96-be4d-d6b07316b91f-operator-scripts\") pod \"manila-2c9f-account-create-update-zpp6n\" (UID: \"016700b4-9818-4d96-be4d-d6b07316b91f\") " pod="openstack/manila-2c9f-account-create-update-zpp6n" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.078596 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfccd\" (UniqueName: \"kubernetes.io/projected/016700b4-9818-4d96-be4d-d6b07316b91f-kube-api-access-wfccd\") pod \"manila-2c9f-account-create-update-zpp6n\" (UID: \"016700b4-9818-4d96-be4d-d6b07316b91f\") " pod="openstack/manila-2c9f-account-create-update-zpp6n" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.078641 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e3edb445-2398-4754-b292-a97fa8d32876-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.078684 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.078709 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g96gg\" (UniqueName: \"kubernetes.io/projected/e3edb445-2398-4754-b292-a97fa8d32876-kube-api-access-g96gg\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.078761 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.078780 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e3edb445-2398-4754-b292-a97fa8d32876-logs\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.078798 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-config-data\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.078832 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-scripts\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.078865 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/e3edb445-2398-4754-b292-a97fa8d32876-ceph\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.078931 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.079594 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e3edb445-2398-4754-b292-a97fa8d32876-logs\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.080088 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e3edb445-2398-4754-b292-a97fa8d32876-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.082190 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.082524 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/e3edb445-2398-4754-b292-a97fa8d32876-ceph\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.082579 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-config-data\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.082707 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.083814 5045 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.085157 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-scripts\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.101062 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g96gg\" (UniqueName: \"kubernetes.io/projected/e3edb445-2398-4754-b292-a97fa8d32876-kube-api-access-g96gg\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.115369 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.123175 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.123334 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: E1125 23:51:20.123666 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ceph combined-ca-bundle config-data glance httpd-run internal-tls-certs kube-api-access-h8rmc logs scripts], unattached volumes=[], failed to process volumes=[ceph combined-ca-bundle config-data glance httpd-run internal-tls-certs kube-api-access-h8rmc logs scripts]: context canceled" pod="openstack/glance-default-internal-api-0" podUID="a451c310-a3d3-46fc-8015-0792f52d8840" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.131723 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-d8b56c587-2p4tv"] Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.134483 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-d8b56c587-2p4tv" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.139076 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-d8b56c587-2p4tv"] Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.145021 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-2t5tj" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.179675 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-scripts\") pod \"e3edb445-2398-4754-b292-a97fa8d32876\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.179767 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"e3edb445-2398-4754-b292-a97fa8d32876\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.179876 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/e3edb445-2398-4754-b292-a97fa8d32876-ceph\") pod \"e3edb445-2398-4754-b292-a97fa8d32876\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.179974 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-public-tls-certs\") pod \"e3edb445-2398-4754-b292-a97fa8d32876\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.180072 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-combined-ca-bundle\") pod \"e3edb445-2398-4754-b292-a97fa8d32876\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.180114 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e3edb445-2398-4754-b292-a97fa8d32876-logs\") pod \"e3edb445-2398-4754-b292-a97fa8d32876\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.180150 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e3edb445-2398-4754-b292-a97fa8d32876-httpd-run\") pod \"e3edb445-2398-4754-b292-a97fa8d32876\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.180231 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-config-data\") pod \"e3edb445-2398-4754-b292-a97fa8d32876\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.180279 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g96gg\" (UniqueName: \"kubernetes.io/projected/e3edb445-2398-4754-b292-a97fa8d32876-kube-api-access-g96gg\") pod \"e3edb445-2398-4754-b292-a97fa8d32876\" (UID: \"e3edb445-2398-4754-b292-a97fa8d32876\") " Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.180620 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/a451c310-a3d3-46fc-8015-0792f52d8840-ceph\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.180702 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-logs\") pod \"horizon-d8b56c587-2p4tv\" (UID: \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\") " pod="openstack/horizon-d8b56c587-2p4tv" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.180824 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8rmc\" (UniqueName: \"kubernetes.io/projected/a451c310-a3d3-46fc-8015-0792f52d8840-kube-api-access-h8rmc\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.180878 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/016700b4-9818-4d96-be4d-d6b07316b91f-operator-scripts\") pod \"manila-2c9f-account-create-update-zpp6n\" (UID: \"016700b4-9818-4d96-be4d-d6b07316b91f\") " pod="openstack/manila-2c9f-account-create-update-zpp6n" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.180903 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.180940 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.180969 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.180993 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfccd\" (UniqueName: \"kubernetes.io/projected/016700b4-9818-4d96-be4d-d6b07316b91f-kube-api-access-wfccd\") pod \"manila-2c9f-account-create-update-zpp6n\" (UID: \"016700b4-9818-4d96-be4d-d6b07316b91f\") " pod="openstack/manila-2c9f-account-create-update-zpp6n" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.181019 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a451c310-a3d3-46fc-8015-0792f52d8840-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.181057 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-horizon-secret-key\") pod \"horizon-d8b56c587-2p4tv\" (UID: \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\") " pod="openstack/horizon-d8b56c587-2p4tv" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.181082 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-scripts\") pod \"horizon-d8b56c587-2p4tv\" (UID: \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\") " pod="openstack/horizon-d8b56c587-2p4tv" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.181144 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a451c310-a3d3-46fc-8015-0792f52d8840-logs\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.181168 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-config-data\") pod \"horizon-d8b56c587-2p4tv\" (UID: \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\") " pod="openstack/horizon-d8b56c587-2p4tv" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.181194 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.181219 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rspmb\" (UniqueName: \"kubernetes.io/projected/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-kube-api-access-rspmb\") pod \"horizon-d8b56c587-2p4tv\" (UID: \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\") " pod="openstack/horizon-d8b56c587-2p4tv" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.181248 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.182086 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3edb445-2398-4754-b292-a97fa8d32876-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "e3edb445-2398-4754-b292-a97fa8d32876" (UID: "e3edb445-2398-4754-b292-a97fa8d32876"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.182425 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/016700b4-9818-4d96-be4d-d6b07316b91f-operator-scripts\") pod \"manila-2c9f-account-create-update-zpp6n\" (UID: \"016700b4-9818-4d96-be4d-d6b07316b91f\") " pod="openstack/manila-2c9f-account-create-update-zpp6n" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.182472 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3edb445-2398-4754-b292-a97fa8d32876-logs" (OuterVolumeSpecName: "logs") pod "e3edb445-2398-4754-b292-a97fa8d32876" (UID: "e3edb445-2398-4754-b292-a97fa8d32876"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.183665 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3edb445-2398-4754-b292-a97fa8d32876-ceph" (OuterVolumeSpecName: "ceph") pod "e3edb445-2398-4754-b292-a97fa8d32876" (UID: "e3edb445-2398-4754-b292-a97fa8d32876"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.183947 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "e3edb445-2398-4754-b292-a97fa8d32876" (UID: "e3edb445-2398-4754-b292-a97fa8d32876"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.185823 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-scripts" (OuterVolumeSpecName: "scripts") pod "e3edb445-2398-4754-b292-a97fa8d32876" (UID: "e3edb445-2398-4754-b292-a97fa8d32876"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.186495 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e3edb445-2398-4754-b292-a97fa8d32876" (UID: "e3edb445-2398-4754-b292-a97fa8d32876"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.186862 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-config-data" (OuterVolumeSpecName: "config-data") pod "e3edb445-2398-4754-b292-a97fa8d32876" (UID: "e3edb445-2398-4754-b292-a97fa8d32876"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.187801 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3edb445-2398-4754-b292-a97fa8d32876-kube-api-access-g96gg" (OuterVolumeSpecName: "kube-api-access-g96gg") pod "e3edb445-2398-4754-b292-a97fa8d32876" (UID: "e3edb445-2398-4754-b292-a97fa8d32876"). InnerVolumeSpecName "kube-api-access-g96gg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.188991 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "e3edb445-2398-4754-b292-a97fa8d32876" (UID: "e3edb445-2398-4754-b292-a97fa8d32876"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.189258 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-79677cc74f-49kdf" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.199396 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfccd\" (UniqueName: \"kubernetes.io/projected/016700b4-9818-4d96-be4d-d6b07316b91f-kube-api-access-wfccd\") pod \"manila-2c9f-account-create-update-zpp6n\" (UID: \"016700b4-9818-4d96-be4d-d6b07316b91f\") " pod="openstack/manila-2c9f-account-create-update-zpp6n" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.282840 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rspmb\" (UniqueName: \"kubernetes.io/projected/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-kube-api-access-rspmb\") pod \"horizon-d8b56c587-2p4tv\" (UID: \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\") " pod="openstack/horizon-d8b56c587-2p4tv" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.283165 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.283201 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.283256 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/a451c310-a3d3-46fc-8015-0792f52d8840-ceph\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.283289 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-logs\") pod \"horizon-d8b56c587-2p4tv\" (UID: \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\") " pod="openstack/horizon-d8b56c587-2p4tv" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.283342 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8rmc\" (UniqueName: \"kubernetes.io/projected/a451c310-a3d3-46fc-8015-0792f52d8840-kube-api-access-h8rmc\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.283386 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.283414 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.283432 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.283439 5045 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.283887 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a451c310-a3d3-46fc-8015-0792f52d8840-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.285110 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-logs\") pod \"horizon-d8b56c587-2p4tv\" (UID: \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\") " pod="openstack/horizon-d8b56c587-2p4tv" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.283450 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a451c310-a3d3-46fc-8015-0792f52d8840-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.288115 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-horizon-secret-key\") pod \"horizon-d8b56c587-2p4tv\" (UID: \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\") " pod="openstack/horizon-d8b56c587-2p4tv" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.288167 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-scripts\") pod \"horizon-d8b56c587-2p4tv\" (UID: \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\") " pod="openstack/horizon-d8b56c587-2p4tv" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.288226 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a451c310-a3d3-46fc-8015-0792f52d8840-logs\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.288242 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-config-data\") pod \"horizon-d8b56c587-2p4tv\" (UID: \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\") " pod="openstack/horizon-d8b56c587-2p4tv" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.288391 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/e3edb445-2398-4754-b292-a97fa8d32876-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.288404 5045 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.288415 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.288424 5045 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e3edb445-2398-4754-b292-a97fa8d32876-logs\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.288433 5045 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e3edb445-2398-4754-b292-a97fa8d32876-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.288456 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.288465 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g96gg\" (UniqueName: \"kubernetes.io/projected/e3edb445-2398-4754-b292-a97fa8d32876-kube-api-access-g96gg\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.288473 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3edb445-2398-4754-b292-a97fa8d32876-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.288491 5045 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.290284 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.290842 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-config-data\") pod \"horizon-d8b56c587-2p4tv\" (UID: \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\") " pod="openstack/horizon-d8b56c587-2p4tv" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.293032 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a451c310-a3d3-46fc-8015-0792f52d8840-logs\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.293507 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-2c9f-account-create-update-zpp6n" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.296055 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-horizon-secret-key\") pod \"horizon-d8b56c587-2p4tv\" (UID: \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\") " pod="openstack/horizon-d8b56c587-2p4tv" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.297795 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-scripts\") pod \"horizon-d8b56c587-2p4tv\" (UID: \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\") " pod="openstack/horizon-d8b56c587-2p4tv" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.298512 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.300273 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.306727 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8rmc\" (UniqueName: \"kubernetes.io/projected/a451c310-a3d3-46fc-8015-0792f52d8840-kube-api-access-h8rmc\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.340869 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.345824 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/a451c310-a3d3-46fc-8015-0792f52d8840-ceph\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.351049 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rspmb\" (UniqueName: \"kubernetes.io/projected/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-kube-api-access-rspmb\") pod \"horizon-d8b56c587-2p4tv\" (UID: \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\") " pod="openstack/horizon-d8b56c587-2p4tv" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.399930 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:51:20 crc kubenswrapper[5045]: E1125 23:51:20.400175 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.421063 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.428133 5045 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.466742 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-d8b56c587-2p4tv" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.486275 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-2t5tj"] Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.511281 5045 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.762171 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.813081 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-79677cc74f-49kdf"] Nov 25 23:51:20 crc kubenswrapper[5045]: W1125 23:51:20.820846 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ebf3d37_a217_499c_8c0c_cb3fc2cbd1eb.slice/crio-a639c8196454ba65e12208c4e8d7d545386ed4fb60c286903309484c366600cf WatchSource:0}: Error finding container a639c8196454ba65e12208c4e8d7d545386ed4fb60c286903309484c366600cf: Status 404 returned error can't find the container with id a639c8196454ba65e12208c4e8d7d545386ed4fb60c286903309484c366600cf Nov 25 23:51:20 crc kubenswrapper[5045]: I1125 23:51:20.944398 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-2c9f-account-create-update-zpp6n"] Nov 25 23:51:21 crc kubenswrapper[5045]: W1125 23:51:21.009455 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod016700b4_9818_4d96_be4d_d6b07316b91f.slice/crio-a4a6e3188fadb255bad8ac462b6a1367ff90e085dceefd22c49253d84cd13d3b WatchSource:0}: Error finding container a4a6e3188fadb255bad8ac462b6a1367ff90e085dceefd22c49253d84cd13d3b: Status 404 returned error can't find the container with id a4a6e3188fadb255bad8ac462b6a1367ff90e085dceefd22c49253d84cd13d3b Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.024881 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"3dcd1a1a-085e-472a-8dab-788bba3c3ce4","Type":"ContainerStarted","Data":"cdc15ef532374972a552e7bde69c8d917cc34b4ac035d1126a4bd7b0fc10fe5f"} Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.030688 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-2c9f-account-create-update-zpp6n" event={"ID":"016700b4-9818-4d96-be4d-d6b07316b91f","Type":"ContainerStarted","Data":"a4a6e3188fadb255bad8ac462b6a1367ff90e085dceefd22c49253d84cd13d3b"} Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.033118 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79677cc74f-49kdf" event={"ID":"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb","Type":"ContainerStarted","Data":"a639c8196454ba65e12208c4e8d7d545386ed4fb60c286903309484c366600cf"} Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.045895 5045 generic.go:334] "Generic (PLEG): container finished" podID="c091d493-4263-41da-a276-6dc859d7d5e1" containerID="ac795a1accd2529668995e4335212aa5d254db73b433a31d8ebc2c54bb3053ab" exitCode=0 Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.045967 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.046193 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-2t5tj" event={"ID":"c091d493-4263-41da-a276-6dc859d7d5e1","Type":"ContainerDied","Data":"ac795a1accd2529668995e4335212aa5d254db73b433a31d8ebc2c54bb3053ab"} Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.046237 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-2t5tj" event={"ID":"c091d493-4263-41da-a276-6dc859d7d5e1","Type":"ContainerStarted","Data":"7406aae6b615fe409b2c906b2046db3007a401bb8e1932c2b3e7916da1aa0685"} Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.046354 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.062320 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.071031 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-d8b56c587-2p4tv"] Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.126503 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/a451c310-a3d3-46fc-8015-0792f52d8840-ceph\") pod \"a451c310-a3d3-46fc-8015-0792f52d8840\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.126554 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-combined-ca-bundle\") pod \"a451c310-a3d3-46fc-8015-0792f52d8840\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.126661 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"a451c310-a3d3-46fc-8015-0792f52d8840\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.126728 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8rmc\" (UniqueName: \"kubernetes.io/projected/a451c310-a3d3-46fc-8015-0792f52d8840-kube-api-access-h8rmc\") pod \"a451c310-a3d3-46fc-8015-0792f52d8840\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.126787 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-internal-tls-certs\") pod \"a451c310-a3d3-46fc-8015-0792f52d8840\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.126903 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-scripts\") pod \"a451c310-a3d3-46fc-8015-0792f52d8840\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.126958 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-config-data\") pod \"a451c310-a3d3-46fc-8015-0792f52d8840\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.127002 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a451c310-a3d3-46fc-8015-0792f52d8840-logs\") pod \"a451c310-a3d3-46fc-8015-0792f52d8840\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.127056 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a451c310-a3d3-46fc-8015-0792f52d8840-httpd-run\") pod \"a451c310-a3d3-46fc-8015-0792f52d8840\" (UID: \"a451c310-a3d3-46fc-8015-0792f52d8840\") " Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.128641 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a451c310-a3d3-46fc-8015-0792f52d8840-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a451c310-a3d3-46fc-8015-0792f52d8840" (UID: "a451c310-a3d3-46fc-8015-0792f52d8840"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.131983 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a451c310-a3d3-46fc-8015-0792f52d8840-logs" (OuterVolumeSpecName: "logs") pod "a451c310-a3d3-46fc-8015-0792f52d8840" (UID: "a451c310-a3d3-46fc-8015-0792f52d8840"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.146083 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "a451c310-a3d3-46fc-8015-0792f52d8840" (UID: "a451c310-a3d3-46fc-8015-0792f52d8840"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.152823 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.158769 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a451c310-a3d3-46fc-8015-0792f52d8840-ceph" (OuterVolumeSpecName: "ceph") pod "a451c310-a3d3-46fc-8015-0792f52d8840" (UID: "a451c310-a3d3-46fc-8015-0792f52d8840"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.158797 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-scripts" (OuterVolumeSpecName: "scripts") pod "a451c310-a3d3-46fc-8015-0792f52d8840" (UID: "a451c310-a3d3-46fc-8015-0792f52d8840"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.158808 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a451c310-a3d3-46fc-8015-0792f52d8840" (UID: "a451c310-a3d3-46fc-8015-0792f52d8840"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.158851 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-config-data" (OuterVolumeSpecName: "config-data") pod "a451c310-a3d3-46fc-8015-0792f52d8840" (UID: "a451c310-a3d3-46fc-8015-0792f52d8840"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.158930 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "a451c310-a3d3-46fc-8015-0792f52d8840" (UID: "a451c310-a3d3-46fc-8015-0792f52d8840"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.164749 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a451c310-a3d3-46fc-8015-0792f52d8840-kube-api-access-h8rmc" (OuterVolumeSpecName: "kube-api-access-h8rmc") pod "a451c310-a3d3-46fc-8015-0792f52d8840" (UID: "a451c310-a3d3-46fc-8015-0792f52d8840"). InnerVolumeSpecName "kube-api-access-h8rmc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.164901 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.179985 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.182232 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.186853 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.190876 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.191964 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.230100 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-scripts\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.230179 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.230224 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-logs\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.230251 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.230284 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-ceph\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.230329 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.230374 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.230458 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-config-data\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.230478 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55z67\" (UniqueName: \"kubernetes.io/projected/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-kube-api-access-55z67\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.230544 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.230559 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.230568 5045 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a451c310-a3d3-46fc-8015-0792f52d8840-logs\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.230576 5045 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a451c310-a3d3-46fc-8015-0792f52d8840-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.230584 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/a451c310-a3d3-46fc-8015-0792f52d8840-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.230592 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.230610 5045 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.230619 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8rmc\" (UniqueName: \"kubernetes.io/projected/a451c310-a3d3-46fc-8015-0792f52d8840-kube-api-access-h8rmc\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.230627 5045 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a451c310-a3d3-46fc-8015-0792f52d8840-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.277332 5045 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.332448 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-scripts\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.332746 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.332850 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-logs\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.332970 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-ceph\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.333054 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.333159 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.333277 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.333445 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-config-data\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.333543 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55z67\" (UniqueName: \"kubernetes.io/projected/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-kube-api-access-55z67\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.333681 5045 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.333980 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.334424 5045 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.334657 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-logs\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.341863 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-config-data\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.342568 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.344321 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-ceph\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.344833 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.348469 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-scripts\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.352122 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55z67\" (UniqueName: \"kubernetes.io/projected/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-kube-api-access-55z67\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.367309 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:21 crc kubenswrapper[5045]: I1125 23:51:21.618173 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.067994 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"d86361ad-5146-475b-b0d4-c505b002904b","Type":"ContainerStarted","Data":"b93a075093295ea6939e1f7c18e8098e7dc5ba65bfedb534145e13fcf74de532"} Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.068524 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"d86361ad-5146-475b-b0d4-c505b002904b","Type":"ContainerStarted","Data":"3aa4bbaa2b3fab5ede5aa48abc5954fab2bd224fcb3498504de4cb02be5dd43f"} Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.071980 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-2c9f-account-create-update-zpp6n" event={"ID":"016700b4-9818-4d96-be4d-d6b07316b91f","Type":"ContainerStarted","Data":"89a043454ac16a34ec97c43db45bb9a9c3ab1b92053b1c560fbb4cd1f9855a7d"} Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.079450 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-d8b56c587-2p4tv" event={"ID":"4c4bf132-dd77-4677-a2d4-1b9ad6266d81","Type":"ContainerStarted","Data":"2c9ad606cd5e08c53bb5ec52d641e55e4fbc3c3b0839c80f167bd7525f82b93c"} Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.079606 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.099066 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=3.047978123 podStartE2EDuration="4.099044785s" podCreationTimestamp="2025-11-25 23:51:18 +0000 UTC" firstStartedPulling="2025-11-25 23:51:20.024358137 +0000 UTC m=+3136.382017249" lastFinishedPulling="2025-11-25 23:51:21.075424799 +0000 UTC m=+3137.433083911" observedRunningTime="2025-11-25 23:51:22.090381294 +0000 UTC m=+3138.448040406" watchObservedRunningTime="2025-11-25 23:51:22.099044785 +0000 UTC m=+3138.456703897" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.105293 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-2c9f-account-create-update-zpp6n" podStartSLOduration=3.105277419 podStartE2EDuration="3.105277419s" podCreationTimestamp="2025-11-25 23:51:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:51:22.10386257 +0000 UTC m=+3138.461521682" watchObservedRunningTime="2025-11-25 23:51:22.105277419 +0000 UTC m=+3138.462936531" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.160610 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.176344 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.188836 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.190423 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.193515 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.193647 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.206937 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.273202 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.291963 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.292041 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-logs\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.292079 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-ceph\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.292109 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.292136 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfwfz\" (UniqueName: \"kubernetes.io/projected/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-kube-api-access-wfwfz\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.292264 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.292404 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.292451 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.292520 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.394062 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.394106 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfwfz\" (UniqueName: \"kubernetes.io/projected/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-kube-api-access-wfwfz\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.394142 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.394188 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.394203 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.394234 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.394265 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.394317 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-logs\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.394401 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-ceph\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.397371 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.399138 5045 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.399154 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-logs\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.405146 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.405409 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.406067 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.411956 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-ceph\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.419844 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a451c310-a3d3-46fc-8015-0792f52d8840" path="/var/lib/kubelet/pods/a451c310-a3d3-46fc-8015-0792f52d8840/volumes" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.421008 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3edb445-2398-4754-b292-a97fa8d32876" path="/var/lib/kubelet/pods/e3edb445-2398-4754-b292-a97fa8d32876/volumes" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.422392 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.423673 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfwfz\" (UniqueName: \"kubernetes.io/projected/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-kube-api-access-wfwfz\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.425190 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.500066 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-79677cc74f-49kdf"] Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.530605 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.538478 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.547834 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-747467cb48-48fv7"] Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.549457 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.558149 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.569476 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-747467cb48-48fv7"] Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.600674 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/776ddfbe-2357-4e0e-a89e-2d82c43f4212-scripts\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.600759 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/776ddfbe-2357-4e0e-a89e-2d82c43f4212-horizon-tls-certs\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.600842 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/776ddfbe-2357-4e0e-a89e-2d82c43f4212-horizon-secret-key\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.601027 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776ddfbe-2357-4e0e-a89e-2d82c43f4212-combined-ca-bundle\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.601085 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/776ddfbe-2357-4e0e-a89e-2d82c43f4212-logs\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.601123 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6654s\" (UniqueName: \"kubernetes.io/projected/776ddfbe-2357-4e0e-a89e-2d82c43f4212-kube-api-access-6654s\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.601175 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/776ddfbe-2357-4e0e-a89e-2d82c43f4212-config-data\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.645619 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.659698 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-2t5tj" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.696763 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-d8b56c587-2p4tv"] Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.702415 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sgjvx\" (UniqueName: \"kubernetes.io/projected/c091d493-4263-41da-a276-6dc859d7d5e1-kube-api-access-sgjvx\") pod \"c091d493-4263-41da-a276-6dc859d7d5e1\" (UID: \"c091d493-4263-41da-a276-6dc859d7d5e1\") " Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.703114 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c091d493-4263-41da-a276-6dc859d7d5e1-operator-scripts\") pod \"c091d493-4263-41da-a276-6dc859d7d5e1\" (UID: \"c091d493-4263-41da-a276-6dc859d7d5e1\") " Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.703618 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c091d493-4263-41da-a276-6dc859d7d5e1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c091d493-4263-41da-a276-6dc859d7d5e1" (UID: "c091d493-4263-41da-a276-6dc859d7d5e1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.704585 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776ddfbe-2357-4e0e-a89e-2d82c43f4212-combined-ca-bundle\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.705407 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/776ddfbe-2357-4e0e-a89e-2d82c43f4212-logs\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.705558 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6654s\" (UniqueName: \"kubernetes.io/projected/776ddfbe-2357-4e0e-a89e-2d82c43f4212-kube-api-access-6654s\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.706436 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/776ddfbe-2357-4e0e-a89e-2d82c43f4212-config-data\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.706872 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/776ddfbe-2357-4e0e-a89e-2d82c43f4212-scripts\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.708072 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/776ddfbe-2357-4e0e-a89e-2d82c43f4212-horizon-tls-certs\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.709091 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/776ddfbe-2357-4e0e-a89e-2d82c43f4212-horizon-secret-key\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.707439 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/776ddfbe-2357-4e0e-a89e-2d82c43f4212-scripts\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.708548 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/776ddfbe-2357-4e0e-a89e-2d82c43f4212-config-data\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.706876 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c091d493-4263-41da-a276-6dc859d7d5e1-kube-api-access-sgjvx" (OuterVolumeSpecName: "kube-api-access-sgjvx") pod "c091d493-4263-41da-a276-6dc859d7d5e1" (UID: "c091d493-4263-41da-a276-6dc859d7d5e1"). InnerVolumeSpecName "kube-api-access-sgjvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.705845 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/776ddfbe-2357-4e0e-a89e-2d82c43f4212-logs\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.709866 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776ddfbe-2357-4e0e-a89e-2d82c43f4212-combined-ca-bundle\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.711244 5045 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c091d493-4263-41da-a276-6dc859d7d5e1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.712411 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/776ddfbe-2357-4e0e-a89e-2d82c43f4212-horizon-secret-key\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.712490 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-88c75b4b-7vjc8"] Nov 25 23:51:22 crc kubenswrapper[5045]: E1125 23:51:22.712899 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c091d493-4263-41da-a276-6dc859d7d5e1" containerName="mariadb-database-create" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.712915 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="c091d493-4263-41da-a276-6dc859d7d5e1" containerName="mariadb-database-create" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.713088 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="c091d493-4263-41da-a276-6dc859d7d5e1" containerName="mariadb-database-create" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.714056 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.730904 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6654s\" (UniqueName: \"kubernetes.io/projected/776ddfbe-2357-4e0e-a89e-2d82c43f4212-kube-api-access-6654s\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.752646 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/776ddfbe-2357-4e0e-a89e-2d82c43f4212-horizon-tls-certs\") pod \"horizon-747467cb48-48fv7\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.766612 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-88c75b4b-7vjc8"] Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.813344 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lt87g\" (UniqueName: \"kubernetes.io/projected/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-kube-api-access-lt87g\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.813391 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-horizon-secret-key\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.813482 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-combined-ca-bundle\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.813967 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-logs\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.814155 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-config-data\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.814305 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-horizon-tls-certs\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.814628 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-scripts\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.814783 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sgjvx\" (UniqueName: \"kubernetes.io/projected/c091d493-4263-41da-a276-6dc859d7d5e1-kube-api-access-sgjvx\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.918779 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-scripts\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.919056 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lt87g\" (UniqueName: \"kubernetes.io/projected/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-kube-api-access-lt87g\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.919088 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-horizon-secret-key\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.919197 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-combined-ca-bundle\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.919248 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-logs\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.919266 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-config-data\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.919298 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-horizon-tls-certs\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.920496 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-logs\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.921452 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-scripts\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.922819 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-config-data\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.924621 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-horizon-secret-key\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.926890 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-horizon-tls-certs\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.929765 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-combined-ca-bundle\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:22 crc kubenswrapper[5045]: I1125 23:51:22.939602 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lt87g\" (UniqueName: \"kubernetes.io/projected/a78ee35a-ac96-40b7-b9aa-92bdaadf339b-kube-api-access-lt87g\") pod \"horizon-88c75b4b-7vjc8\" (UID: \"a78ee35a-ac96-40b7-b9aa-92bdaadf339b\") " pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:23 crc kubenswrapper[5045]: I1125 23:51:23.005903 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:51:23 crc kubenswrapper[5045]: I1125 23:51:23.066107 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:51:23 crc kubenswrapper[5045]: I1125 23:51:23.093801 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82","Type":"ContainerStarted","Data":"90775b3e1c61b15b72b0de9f6f26c59545b7a5c242a573cdb07e02589b461236"} Nov 25 23:51:23 crc kubenswrapper[5045]: I1125 23:51:23.100421 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"3dcd1a1a-085e-472a-8dab-788bba3c3ce4","Type":"ContainerStarted","Data":"11af4642776f549f48d3d11b6fd017be383761f43462eb1ee6fd84b99feeebaf"} Nov 25 23:51:23 crc kubenswrapper[5045]: I1125 23:51:23.103817 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-2c9f-account-create-update-zpp6n" event={"ID":"016700b4-9818-4d96-be4d-d6b07316b91f","Type":"ContainerDied","Data":"89a043454ac16a34ec97c43db45bb9a9c3ab1b92053b1c560fbb4cd1f9855a7d"} Nov 25 23:51:23 crc kubenswrapper[5045]: I1125 23:51:23.103808 5045 generic.go:334] "Generic (PLEG): container finished" podID="016700b4-9818-4d96-be4d-d6b07316b91f" containerID="89a043454ac16a34ec97c43db45bb9a9c3ab1b92053b1c560fbb4cd1f9855a7d" exitCode=0 Nov 25 23:51:23 crc kubenswrapper[5045]: I1125 23:51:23.107767 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-2t5tj" event={"ID":"c091d493-4263-41da-a276-6dc859d7d5e1","Type":"ContainerDied","Data":"7406aae6b615fe409b2c906b2046db3007a401bb8e1932c2b3e7916da1aa0685"} Nov 25 23:51:23 crc kubenswrapper[5045]: I1125 23:51:23.107799 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7406aae6b615fe409b2c906b2046db3007a401bb8e1932c2b3e7916da1aa0685" Nov 25 23:51:23 crc kubenswrapper[5045]: I1125 23:51:23.107812 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-2t5tj" Nov 25 23:51:23 crc kubenswrapper[5045]: I1125 23:51:23.244826 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 23:51:23 crc kubenswrapper[5045]: I1125 23:51:23.505131 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-747467cb48-48fv7"] Nov 25 23:51:23 crc kubenswrapper[5045]: I1125 23:51:23.637931 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-88c75b4b-7vjc8"] Nov 25 23:51:24 crc kubenswrapper[5045]: I1125 23:51:24.125080 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"3dcd1a1a-085e-472a-8dab-788bba3c3ce4","Type":"ContainerStarted","Data":"9d4baac35da3e8d374f965630a3e47c6263bf30dc82af753e2c80cbbe7c60e37"} Nov 25 23:51:24 crc kubenswrapper[5045]: I1125 23:51:24.128280 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-747467cb48-48fv7" event={"ID":"776ddfbe-2357-4e0e-a89e-2d82c43f4212","Type":"ContainerStarted","Data":"e954849daa2b4bdc36edea7f5ce6b318a3ea7393e639b3a48078e014ff34d3e7"} Nov 25 23:51:24 crc kubenswrapper[5045]: I1125 23:51:24.129852 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5e0d9560-504a-4e4d-8deb-cc36b9afe12f","Type":"ContainerStarted","Data":"97b0e837c9f2cb48d198c9a3e776786f6869525f84953ba107cad6f3c3facfa6"} Nov 25 23:51:24 crc kubenswrapper[5045]: I1125 23:51:24.129879 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5e0d9560-504a-4e4d-8deb-cc36b9afe12f","Type":"ContainerStarted","Data":"31ba8f9e09d5d06f20516b6cd83f58bb67aeecb51b9b51ad9d82ad073d3ccd4d"} Nov 25 23:51:24 crc kubenswrapper[5045]: I1125 23:51:24.131433 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-88c75b4b-7vjc8" event={"ID":"a78ee35a-ac96-40b7-b9aa-92bdaadf339b","Type":"ContainerStarted","Data":"25decaf0d3cf873352152d6fbe3d0d2a13bb975460b49a2340209bc6d5cee321"} Nov 25 23:51:24 crc kubenswrapper[5045]: I1125 23:51:24.133330 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82","Type":"ContainerStarted","Data":"58c9a76f93075057d7b0fcc997b65828fb790ced8991286a72e8be84f98eb821"} Nov 25 23:51:24 crc kubenswrapper[5045]: I1125 23:51:24.147619 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-volume1-0" podStartSLOduration=4.278122253 podStartE2EDuration="6.147604755s" podCreationTimestamp="2025-11-25 23:51:18 +0000 UTC" firstStartedPulling="2025-11-25 23:51:20.7728803 +0000 UTC m=+3137.130539412" lastFinishedPulling="2025-11-25 23:51:22.642362801 +0000 UTC m=+3139.000021914" observedRunningTime="2025-11-25 23:51:24.142172253 +0000 UTC m=+3140.499831385" watchObservedRunningTime="2025-11-25 23:51:24.147604755 +0000 UTC m=+3140.505263867" Nov 25 23:51:24 crc kubenswrapper[5045]: I1125 23:51:24.272209 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:24 crc kubenswrapper[5045]: I1125 23:51:24.348898 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Nov 25 23:51:24 crc kubenswrapper[5045]: I1125 23:51:24.711525 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-2c9f-account-create-update-zpp6n" Nov 25 23:51:24 crc kubenswrapper[5045]: I1125 23:51:24.765172 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wfccd\" (UniqueName: \"kubernetes.io/projected/016700b4-9818-4d96-be4d-d6b07316b91f-kube-api-access-wfccd\") pod \"016700b4-9818-4d96-be4d-d6b07316b91f\" (UID: \"016700b4-9818-4d96-be4d-d6b07316b91f\") " Nov 25 23:51:24 crc kubenswrapper[5045]: I1125 23:51:24.765377 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/016700b4-9818-4d96-be4d-d6b07316b91f-operator-scripts\") pod \"016700b4-9818-4d96-be4d-d6b07316b91f\" (UID: \"016700b4-9818-4d96-be4d-d6b07316b91f\") " Nov 25 23:51:24 crc kubenswrapper[5045]: I1125 23:51:24.766280 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/016700b4-9818-4d96-be4d-d6b07316b91f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "016700b4-9818-4d96-be4d-d6b07316b91f" (UID: "016700b4-9818-4d96-be4d-d6b07316b91f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:51:24 crc kubenswrapper[5045]: I1125 23:51:24.782414 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/016700b4-9818-4d96-be4d-d6b07316b91f-kube-api-access-wfccd" (OuterVolumeSpecName: "kube-api-access-wfccd") pod "016700b4-9818-4d96-be4d-d6b07316b91f" (UID: "016700b4-9818-4d96-be4d-d6b07316b91f"). InnerVolumeSpecName "kube-api-access-wfccd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:51:24 crc kubenswrapper[5045]: I1125 23:51:24.871835 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wfccd\" (UniqueName: \"kubernetes.io/projected/016700b4-9818-4d96-be4d-d6b07316b91f-kube-api-access-wfccd\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:24 crc kubenswrapper[5045]: I1125 23:51:24.871864 5045 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/016700b4-9818-4d96-be4d-d6b07316b91f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:25 crc kubenswrapper[5045]: I1125 23:51:25.181086 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82","Type":"ContainerStarted","Data":"82f0258cc263d0416f6df4dc98e9c7e06e13b35b0636b78e54a297cb70793453"} Nov 25 23:51:25 crc kubenswrapper[5045]: I1125 23:51:25.185591 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-2c9f-account-create-update-zpp6n" Nov 25 23:51:25 crc kubenswrapper[5045]: I1125 23:51:25.185752 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-2c9f-account-create-update-zpp6n" event={"ID":"016700b4-9818-4d96-be4d-d6b07316b91f","Type":"ContainerDied","Data":"a4a6e3188fadb255bad8ac462b6a1367ff90e085dceefd22c49253d84cd13d3b"} Nov 25 23:51:25 crc kubenswrapper[5045]: I1125 23:51:25.185784 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4a6e3188fadb255bad8ac462b6a1367ff90e085dceefd22c49253d84cd13d3b" Nov 25 23:51:26 crc kubenswrapper[5045]: I1125 23:51:26.198091 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="ec7eddef-d6a8-4a39-9aaf-6bead1bebd82" containerName="glance-log" containerID="cri-o://58c9a76f93075057d7b0fcc997b65828fb790ced8991286a72e8be84f98eb821" gracePeriod=30 Nov 25 23:51:26 crc kubenswrapper[5045]: I1125 23:51:26.198537 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="5e0d9560-504a-4e4d-8deb-cc36b9afe12f" containerName="glance-log" containerID="cri-o://97b0e837c9f2cb48d198c9a3e776786f6869525f84953ba107cad6f3c3facfa6" gracePeriod=30 Nov 25 23:51:26 crc kubenswrapper[5045]: I1125 23:51:26.198594 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5e0d9560-504a-4e4d-8deb-cc36b9afe12f","Type":"ContainerStarted","Data":"15a7345f3a33328106ed25d0f2a12e5869767ce1cb822ae5e5e72477eb80f231"} Nov 25 23:51:26 crc kubenswrapper[5045]: I1125 23:51:26.199974 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="ec7eddef-d6a8-4a39-9aaf-6bead1bebd82" containerName="glance-httpd" containerID="cri-o://82f0258cc263d0416f6df4dc98e9c7e06e13b35b0636b78e54a297cb70793453" gracePeriod=30 Nov 25 23:51:26 crc kubenswrapper[5045]: I1125 23:51:26.200072 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="5e0d9560-504a-4e4d-8deb-cc36b9afe12f" containerName="glance-httpd" containerID="cri-o://15a7345f3a33328106ed25d0f2a12e5869767ce1cb822ae5e5e72477eb80f231" gracePeriod=30 Nov 25 23:51:26 crc kubenswrapper[5045]: I1125 23:51:26.234610 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.234591225 podStartE2EDuration="5.234591225s" podCreationTimestamp="2025-11-25 23:51:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:51:26.22363615 +0000 UTC m=+3142.581295262" watchObservedRunningTime="2025-11-25 23:51:26.234591225 +0000 UTC m=+3142.592250327" Nov 25 23:51:26 crc kubenswrapper[5045]: I1125 23:51:26.253891 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.253869363 podStartE2EDuration="4.253869363s" podCreationTimestamp="2025-11-25 23:51:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:51:26.244529812 +0000 UTC m=+3142.602188934" watchObservedRunningTime="2025-11-25 23:51:26.253869363 +0000 UTC m=+3142.611528475" Nov 25 23:51:27 crc kubenswrapper[5045]: I1125 23:51:27.210277 5045 generic.go:334] "Generic (PLEG): container finished" podID="ec7eddef-d6a8-4a39-9aaf-6bead1bebd82" containerID="82f0258cc263d0416f6df4dc98e9c7e06e13b35b0636b78e54a297cb70793453" exitCode=0 Nov 25 23:51:27 crc kubenswrapper[5045]: I1125 23:51:27.210625 5045 generic.go:334] "Generic (PLEG): container finished" podID="ec7eddef-d6a8-4a39-9aaf-6bead1bebd82" containerID="58c9a76f93075057d7b0fcc997b65828fb790ced8991286a72e8be84f98eb821" exitCode=143 Nov 25 23:51:27 crc kubenswrapper[5045]: I1125 23:51:27.210345 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82","Type":"ContainerDied","Data":"82f0258cc263d0416f6df4dc98e9c7e06e13b35b0636b78e54a297cb70793453"} Nov 25 23:51:27 crc kubenswrapper[5045]: I1125 23:51:27.210692 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82","Type":"ContainerDied","Data":"58c9a76f93075057d7b0fcc997b65828fb790ced8991286a72e8be84f98eb821"} Nov 25 23:51:27 crc kubenswrapper[5045]: I1125 23:51:27.213420 5045 generic.go:334] "Generic (PLEG): container finished" podID="5e0d9560-504a-4e4d-8deb-cc36b9afe12f" containerID="15a7345f3a33328106ed25d0f2a12e5869767ce1cb822ae5e5e72477eb80f231" exitCode=0 Nov 25 23:51:27 crc kubenswrapper[5045]: I1125 23:51:27.213461 5045 generic.go:334] "Generic (PLEG): container finished" podID="5e0d9560-504a-4e4d-8deb-cc36b9afe12f" containerID="97b0e837c9f2cb48d198c9a3e776786f6869525f84953ba107cad6f3c3facfa6" exitCode=143 Nov 25 23:51:27 crc kubenswrapper[5045]: I1125 23:51:27.213469 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5e0d9560-504a-4e4d-8deb-cc36b9afe12f","Type":"ContainerDied","Data":"15a7345f3a33328106ed25d0f2a12e5869767ce1cb822ae5e5e72477eb80f231"} Nov 25 23:51:27 crc kubenswrapper[5045]: I1125 23:51:27.213509 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5e0d9560-504a-4e4d-8deb-cc36b9afe12f","Type":"ContainerDied","Data":"97b0e837c9f2cb48d198c9a3e776786f6869525f84953ba107cad6f3c3facfa6"} Nov 25 23:51:29 crc kubenswrapper[5045]: I1125 23:51:29.466758 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-volume1-0" Nov 25 23:51:29 crc kubenswrapper[5045]: I1125 23:51:29.575112 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Nov 25 23:51:30 crc kubenswrapper[5045]: I1125 23:51:30.547150 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-sync-fjsxq"] Nov 25 23:51:30 crc kubenswrapper[5045]: E1125 23:51:30.547874 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="016700b4-9818-4d96-be4d-d6b07316b91f" containerName="mariadb-account-create-update" Nov 25 23:51:30 crc kubenswrapper[5045]: I1125 23:51:30.547919 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="016700b4-9818-4d96-be4d-d6b07316b91f" containerName="mariadb-account-create-update" Nov 25 23:51:30 crc kubenswrapper[5045]: I1125 23:51:30.548385 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="016700b4-9818-4d96-be4d-d6b07316b91f" containerName="mariadb-account-create-update" Nov 25 23:51:30 crc kubenswrapper[5045]: I1125 23:51:30.549672 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-fjsxq" Nov 25 23:51:30 crc kubenswrapper[5045]: I1125 23:51:30.552414 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-jrkhj" Nov 25 23:51:30 crc kubenswrapper[5045]: I1125 23:51:30.552774 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 25 23:51:30 crc kubenswrapper[5045]: I1125 23:51:30.559912 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-fjsxq"] Nov 25 23:51:30 crc kubenswrapper[5045]: I1125 23:51:30.605006 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n79rb\" (UniqueName: \"kubernetes.io/projected/8b764d3f-bf9c-4407-b5d5-6f2834714d50-kube-api-access-n79rb\") pod \"manila-db-sync-fjsxq\" (UID: \"8b764d3f-bf9c-4407-b5d5-6f2834714d50\") " pod="openstack/manila-db-sync-fjsxq" Nov 25 23:51:30 crc kubenswrapper[5045]: I1125 23:51:30.605079 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b764d3f-bf9c-4407-b5d5-6f2834714d50-combined-ca-bundle\") pod \"manila-db-sync-fjsxq\" (UID: \"8b764d3f-bf9c-4407-b5d5-6f2834714d50\") " pod="openstack/manila-db-sync-fjsxq" Nov 25 23:51:30 crc kubenswrapper[5045]: I1125 23:51:30.605232 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/8b764d3f-bf9c-4407-b5d5-6f2834714d50-job-config-data\") pod \"manila-db-sync-fjsxq\" (UID: \"8b764d3f-bf9c-4407-b5d5-6f2834714d50\") " pod="openstack/manila-db-sync-fjsxq" Nov 25 23:51:30 crc kubenswrapper[5045]: I1125 23:51:30.605265 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b764d3f-bf9c-4407-b5d5-6f2834714d50-config-data\") pod \"manila-db-sync-fjsxq\" (UID: \"8b764d3f-bf9c-4407-b5d5-6f2834714d50\") " pod="openstack/manila-db-sync-fjsxq" Nov 25 23:51:30 crc kubenswrapper[5045]: I1125 23:51:30.707152 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b764d3f-bf9c-4407-b5d5-6f2834714d50-combined-ca-bundle\") pod \"manila-db-sync-fjsxq\" (UID: \"8b764d3f-bf9c-4407-b5d5-6f2834714d50\") " pod="openstack/manila-db-sync-fjsxq" Nov 25 23:51:30 crc kubenswrapper[5045]: I1125 23:51:30.707422 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/8b764d3f-bf9c-4407-b5d5-6f2834714d50-job-config-data\") pod \"manila-db-sync-fjsxq\" (UID: \"8b764d3f-bf9c-4407-b5d5-6f2834714d50\") " pod="openstack/manila-db-sync-fjsxq" Nov 25 23:51:30 crc kubenswrapper[5045]: I1125 23:51:30.707466 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b764d3f-bf9c-4407-b5d5-6f2834714d50-config-data\") pod \"manila-db-sync-fjsxq\" (UID: \"8b764d3f-bf9c-4407-b5d5-6f2834714d50\") " pod="openstack/manila-db-sync-fjsxq" Nov 25 23:51:30 crc kubenswrapper[5045]: I1125 23:51:30.707589 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n79rb\" (UniqueName: \"kubernetes.io/projected/8b764d3f-bf9c-4407-b5d5-6f2834714d50-kube-api-access-n79rb\") pod \"manila-db-sync-fjsxq\" (UID: \"8b764d3f-bf9c-4407-b5d5-6f2834714d50\") " pod="openstack/manila-db-sync-fjsxq" Nov 25 23:51:30 crc kubenswrapper[5045]: I1125 23:51:30.714485 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b764d3f-bf9c-4407-b5d5-6f2834714d50-combined-ca-bundle\") pod \"manila-db-sync-fjsxq\" (UID: \"8b764d3f-bf9c-4407-b5d5-6f2834714d50\") " pod="openstack/manila-db-sync-fjsxq" Nov 25 23:51:30 crc kubenswrapper[5045]: I1125 23:51:30.715775 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b764d3f-bf9c-4407-b5d5-6f2834714d50-config-data\") pod \"manila-db-sync-fjsxq\" (UID: \"8b764d3f-bf9c-4407-b5d5-6f2834714d50\") " pod="openstack/manila-db-sync-fjsxq" Nov 25 23:51:30 crc kubenswrapper[5045]: I1125 23:51:30.716493 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/8b764d3f-bf9c-4407-b5d5-6f2834714d50-job-config-data\") pod \"manila-db-sync-fjsxq\" (UID: \"8b764d3f-bf9c-4407-b5d5-6f2834714d50\") " pod="openstack/manila-db-sync-fjsxq" Nov 25 23:51:30 crc kubenswrapper[5045]: I1125 23:51:30.728939 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n79rb\" (UniqueName: \"kubernetes.io/projected/8b764d3f-bf9c-4407-b5d5-6f2834714d50-kube-api-access-n79rb\") pod \"manila-db-sync-fjsxq\" (UID: \"8b764d3f-bf9c-4407-b5d5-6f2834714d50\") " pod="openstack/manila-db-sync-fjsxq" Nov 25 23:51:30 crc kubenswrapper[5045]: I1125 23:51:30.885432 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-fjsxq" Nov 25 23:51:31 crc kubenswrapper[5045]: I1125 23:51:31.397023 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:51:31 crc kubenswrapper[5045]: E1125 23:51:31.397679 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:51:36 crc kubenswrapper[5045]: W1125 23:51:36.744291 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8b764d3f_bf9c_4407_b5d5_6f2834714d50.slice/crio-0217bc4620f842cef5d2676cdb54780507b82bf8fecfcb2d9223843e6cc7a094 WatchSource:0}: Error finding container 0217bc4620f842cef5d2676cdb54780507b82bf8fecfcb2d9223843e6cc7a094: Status 404 returned error can't find the container with id 0217bc4620f842cef5d2676cdb54780507b82bf8fecfcb2d9223843e6cc7a094 Nov 25 23:51:36 crc kubenswrapper[5045]: I1125 23:51:36.747914 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-fjsxq"] Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.333183 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-fjsxq" event={"ID":"8b764d3f-bf9c-4407-b5d5-6f2834714d50","Type":"ContainerStarted","Data":"0217bc4620f842cef5d2676cdb54780507b82bf8fecfcb2d9223843e6cc7a094"} Nov 25 23:51:37 crc kubenswrapper[5045]: E1125 23:51:37.674400 5045 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 25 23:51:37 crc kubenswrapper[5045]: E1125 23:51:37.674998 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n58ch58h6dh586hbfh8fhb9hbh76h56h646h54ch66h559h6bh58ch589hdbh654h548h6h8dh86h56h8bh5b7h7bhbbh545hf9hd4h694q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cml67,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-79677cc74f-49kdf_openstack(7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 23:51:37 crc kubenswrapper[5045]: E1125 23:51:37.681092 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-79677cc74f-49kdf" podUID="7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.699032 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.783113 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-httpd-run\") pod \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.783162 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-combined-ca-bundle\") pod \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.783251 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-logs\") pod \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.783357 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-scripts\") pod \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.783455 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-ceph\") pod \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.783491 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-config-data\") pod \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.783507 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-internal-tls-certs\") pod \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.783551 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.783569 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wfwfz\" (UniqueName: \"kubernetes.io/projected/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-kube-api-access-wfwfz\") pod \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\" (UID: \"5e0d9560-504a-4e4d-8deb-cc36b9afe12f\") " Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.783540 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "5e0d9560-504a-4e4d-8deb-cc36b9afe12f" (UID: "5e0d9560-504a-4e4d-8deb-cc36b9afe12f"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.785026 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-logs" (OuterVolumeSpecName: "logs") pod "5e0d9560-504a-4e4d-8deb-cc36b9afe12f" (UID: "5e0d9560-504a-4e4d-8deb-cc36b9afe12f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.788630 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "5e0d9560-504a-4e4d-8deb-cc36b9afe12f" (UID: "5e0d9560-504a-4e4d-8deb-cc36b9afe12f"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.789063 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-kube-api-access-wfwfz" (OuterVolumeSpecName: "kube-api-access-wfwfz") pod "5e0d9560-504a-4e4d-8deb-cc36b9afe12f" (UID: "5e0d9560-504a-4e4d-8deb-cc36b9afe12f"). InnerVolumeSpecName "kube-api-access-wfwfz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.789342 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-ceph" (OuterVolumeSpecName: "ceph") pod "5e0d9560-504a-4e4d-8deb-cc36b9afe12f" (UID: "5e0d9560-504a-4e4d-8deb-cc36b9afe12f"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.790629 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-scripts" (OuterVolumeSpecName: "scripts") pod "5e0d9560-504a-4e4d-8deb-cc36b9afe12f" (UID: "5e0d9560-504a-4e4d-8deb-cc36b9afe12f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.812021 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5e0d9560-504a-4e4d-8deb-cc36b9afe12f" (UID: "5e0d9560-504a-4e4d-8deb-cc36b9afe12f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.833252 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-config-data" (OuterVolumeSpecName: "config-data") pod "5e0d9560-504a-4e4d-8deb-cc36b9afe12f" (UID: "5e0d9560-504a-4e4d-8deb-cc36b9afe12f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.834851 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "5e0d9560-504a-4e4d-8deb-cc36b9afe12f" (UID: "5e0d9560-504a-4e4d-8deb-cc36b9afe12f"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.848765 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.885008 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-combined-ca-bundle\") pod \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.885119 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-ceph\") pod \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.885145 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-55z67\" (UniqueName: \"kubernetes.io/projected/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-kube-api-access-55z67\") pod \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.886089 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "ec7eddef-d6a8-4a39-9aaf-6bead1bebd82" (UID: "ec7eddef-d6a8-4a39-9aaf-6bead1bebd82"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.885214 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-httpd-run\") pod \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.886471 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.886533 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-logs\") pod \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.886576 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-config-data\") pod \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.886595 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-scripts\") pod \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.886611 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-public-tls-certs\") pod \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\" (UID: \"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82\") " Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.887391 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.887417 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.887430 5045 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.887453 5045 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.887466 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wfwfz\" (UniqueName: \"kubernetes.io/projected/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-kube-api-access-wfwfz\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.887479 5045 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.887490 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.887501 5045 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-logs\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.887512 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e0d9560-504a-4e4d-8deb-cc36b9afe12f-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.887522 5045 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.888148 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-logs" (OuterVolumeSpecName: "logs") pod "ec7eddef-d6a8-4a39-9aaf-6bead1bebd82" (UID: "ec7eddef-d6a8-4a39-9aaf-6bead1bebd82"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.889862 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-ceph" (OuterVolumeSpecName: "ceph") pod "ec7eddef-d6a8-4a39-9aaf-6bead1bebd82" (UID: "ec7eddef-d6a8-4a39-9aaf-6bead1bebd82"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.890407 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-kube-api-access-55z67" (OuterVolumeSpecName: "kube-api-access-55z67") pod "ec7eddef-d6a8-4a39-9aaf-6bead1bebd82" (UID: "ec7eddef-d6a8-4a39-9aaf-6bead1bebd82"). InnerVolumeSpecName "kube-api-access-55z67". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.891243 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "ec7eddef-d6a8-4a39-9aaf-6bead1bebd82" (UID: "ec7eddef-d6a8-4a39-9aaf-6bead1bebd82"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.891657 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-scripts" (OuterVolumeSpecName: "scripts") pod "ec7eddef-d6a8-4a39-9aaf-6bead1bebd82" (UID: "ec7eddef-d6a8-4a39-9aaf-6bead1bebd82"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.910273 5045 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.918858 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ec7eddef-d6a8-4a39-9aaf-6bead1bebd82" (UID: "ec7eddef-d6a8-4a39-9aaf-6bead1bebd82"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.933879 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-config-data" (OuterVolumeSpecName: "config-data") pod "ec7eddef-d6a8-4a39-9aaf-6bead1bebd82" (UID: "ec7eddef-d6a8-4a39-9aaf-6bead1bebd82"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.947879 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ec7eddef-d6a8-4a39-9aaf-6bead1bebd82" (UID: "ec7eddef-d6a8-4a39-9aaf-6bead1bebd82"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.989110 5045 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.989140 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.989150 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.989159 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-55z67\" (UniqueName: \"kubernetes.io/projected/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-kube-api-access-55z67\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.989189 5045 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.989199 5045 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-logs\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.989208 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.989216 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:37 crc kubenswrapper[5045]: I1125 23:51:37.989224 5045 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.007861 5045 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.128816 5045 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.344377 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5e0d9560-504a-4e4d-8deb-cc36b9afe12f","Type":"ContainerDied","Data":"31ba8f9e09d5d06f20516b6cd83f58bb67aeecb51b9b51ad9d82ad073d3ccd4d"} Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.344465 5045 scope.go:117] "RemoveContainer" containerID="15a7345f3a33328106ed25d0f2a12e5869767ce1cb822ae5e5e72477eb80f231" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.344809 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.348179 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.353032 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ec7eddef-d6a8-4a39-9aaf-6bead1bebd82","Type":"ContainerDied","Data":"90775b3e1c61b15b72b0de9f6f26c59545b7a5c242a573cdb07e02589b461236"} Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.454575 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.481647 5045 scope.go:117] "RemoveContainer" containerID="97b0e837c9f2cb48d198c9a3e776786f6869525f84953ba107cad6f3c3facfa6" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.498278 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.513346 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.523056 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.530774 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 23:51:38 crc kubenswrapper[5045]: E1125 23:51:38.531904 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e0d9560-504a-4e4d-8deb-cc36b9afe12f" containerName="glance-httpd" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.531941 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e0d9560-504a-4e4d-8deb-cc36b9afe12f" containerName="glance-httpd" Nov 25 23:51:38 crc kubenswrapper[5045]: E1125 23:51:38.531968 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec7eddef-d6a8-4a39-9aaf-6bead1bebd82" containerName="glance-log" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.531976 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec7eddef-d6a8-4a39-9aaf-6bead1bebd82" containerName="glance-log" Nov 25 23:51:38 crc kubenswrapper[5045]: E1125 23:51:38.533781 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e0d9560-504a-4e4d-8deb-cc36b9afe12f" containerName="glance-log" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.533825 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e0d9560-504a-4e4d-8deb-cc36b9afe12f" containerName="glance-log" Nov 25 23:51:38 crc kubenswrapper[5045]: E1125 23:51:38.533854 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec7eddef-d6a8-4a39-9aaf-6bead1bebd82" containerName="glance-httpd" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.533865 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec7eddef-d6a8-4a39-9aaf-6bead1bebd82" containerName="glance-httpd" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.538963 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e0d9560-504a-4e4d-8deb-cc36b9afe12f" containerName="glance-log" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.539021 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec7eddef-d6a8-4a39-9aaf-6bead1bebd82" containerName="glance-log" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.539092 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec7eddef-d6a8-4a39-9aaf-6bead1bebd82" containerName="glance-httpd" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.539106 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e0d9560-504a-4e4d-8deb-cc36b9afe12f" containerName="glance-httpd" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.541344 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.541507 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.543258 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.543487 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.543947 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.544832 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-pjcxf" Nov 25 23:51:38 crc kubenswrapper[5045]: E1125 23:51:38.548616 5045 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 25 23:51:38 crc kubenswrapper[5045]: E1125 23:51:38.549053 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nc6h6h76h587h648h66dh564h677h684h7h594hcdh77h54dhcch67dh654h5d6h88h6chd5h65bh55ch67dhf5h688h5b4h555h5b4h66fh67fh5bbq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rspmb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-d8b56c587-2p4tv_openstack(4c4bf132-dd77-4677-a2d4-1b9ad6266d81): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.555150 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.558547 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.560257 5045 scope.go:117] "RemoveContainer" containerID="82f0258cc263d0416f6df4dc98e9c7e06e13b35b0636b78e54a297cb70793453" Nov 25 23:51:38 crc kubenswrapper[5045]: E1125 23:51:38.560361 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-d8b56c587-2p4tv" podUID="4c4bf132-dd77-4677-a2d4-1b9ad6266d81" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.562611 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.567079 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.571106 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 23:51:38 crc kubenswrapper[5045]: E1125 23:51:38.609138 5045 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podec7eddef_d6a8_4a39_9aaf_6bead1bebd82.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5e0d9560_504a_4e4d_8deb_cc36b9afe12f.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podec7eddef_d6a8_4a39_9aaf_6bead1bebd82.slice/crio-90775b3e1c61b15b72b0de9f6f26c59545b7a5c242a573cdb07e02589b461236\": RecentStats: unable to find data in memory cache]" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.638517 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf6cc533-9827-4132-9d84-50fe49efef41-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.638564 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.638592 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.638660 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf6cc533-9827-4132-9d84-50fe49efef41-scripts\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.638686 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bf6cc533-9827-4132-9d84-50fe49efef41-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.638706 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf6cc533-9827-4132-9d84-50fe49efef41-logs\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.638855 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5j46\" (UniqueName: \"kubernetes.io/projected/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-kube-api-access-w5j46\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.639027 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-logs\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.639095 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf6cc533-9827-4132-9d84-50fe49efef41-config-data\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.639140 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffjl2\" (UniqueName: \"kubernetes.io/projected/bf6cc533-9827-4132-9d84-50fe49efef41-kube-api-access-ffjl2\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.639220 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.639439 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.639511 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf6cc533-9827-4132-9d84-50fe49efef41-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.639549 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/bf6cc533-9827-4132-9d84-50fe49efef41-ceph\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.639627 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.639705 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.639763 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-ceph\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.639784 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.684943 5045 scope.go:117] "RemoveContainer" containerID="58c9a76f93075057d7b0fcc997b65828fb790ced8991286a72e8be84f98eb821" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.740987 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.741029 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.741068 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf6cc533-9827-4132-9d84-50fe49efef41-scripts\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.741085 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bf6cc533-9827-4132-9d84-50fe49efef41-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.741100 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf6cc533-9827-4132-9d84-50fe49efef41-logs\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.741118 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5j46\" (UniqueName: \"kubernetes.io/projected/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-kube-api-access-w5j46\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.741148 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-logs\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.741168 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf6cc533-9827-4132-9d84-50fe49efef41-config-data\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.741189 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffjl2\" (UniqueName: \"kubernetes.io/projected/bf6cc533-9827-4132-9d84-50fe49efef41-kube-api-access-ffjl2\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.741214 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.741264 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.741284 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf6cc533-9827-4132-9d84-50fe49efef41-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.741298 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/bf6cc533-9827-4132-9d84-50fe49efef41-ceph\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.741321 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.741360 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.741386 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-ceph\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.741403 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.741460 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf6cc533-9827-4132-9d84-50fe49efef41-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.742058 5045 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.742472 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.742979 5045 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.743069 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf6cc533-9827-4132-9d84-50fe49efef41-logs\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.743206 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-logs\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.747485 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bf6cc533-9827-4132-9d84-50fe49efef41-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.747546 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/bf6cc533-9827-4132-9d84-50fe49efef41-ceph\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.748025 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.749311 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.752364 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-ceph\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.755493 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.755560 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf6cc533-9827-4132-9d84-50fe49efef41-scripts\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.756362 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf6cc533-9827-4132-9d84-50fe49efef41-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.756525 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf6cc533-9827-4132-9d84-50fe49efef41-config-data\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.758120 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.759327 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf6cc533-9827-4132-9d84-50fe49efef41-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.763428 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffjl2\" (UniqueName: \"kubernetes.io/projected/bf6cc533-9827-4132-9d84-50fe49efef41-kube-api-access-ffjl2\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.778841 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5j46\" (UniqueName: \"kubernetes.io/projected/c46cc706-d7b4-4d2a-b75e-a8bed8a125eb-kube-api-access-w5j46\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.809358 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb\") " pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.820269 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"bf6cc533-9827-4132-9d84-50fe49efef41\") " pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.881626 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.893674 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:38 crc kubenswrapper[5045]: I1125 23:51:38.969097 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-79677cc74f-49kdf" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.046589 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-scripts\") pod \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\" (UID: \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\") " Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.046681 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-horizon-secret-key\") pod \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\" (UID: \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\") " Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.046817 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cml67\" (UniqueName: \"kubernetes.io/projected/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-kube-api-access-cml67\") pod \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\" (UID: \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\") " Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.046876 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-logs\") pod \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\" (UID: \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\") " Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.046896 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-config-data\") pod \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\" (UID: \"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb\") " Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.047816 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-scripts" (OuterVolumeSpecName: "scripts") pod "7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb" (UID: "7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.048166 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-logs" (OuterVolumeSpecName: "logs") pod "7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb" (UID: "7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.048219 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-config-data" (OuterVolumeSpecName: "config-data") pod "7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb" (UID: "7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.054098 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-kube-api-access-cml67" (OuterVolumeSpecName: "kube-api-access-cml67") pod "7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb" (UID: "7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb"). InnerVolumeSpecName "kube-api-access-cml67". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.059051 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb" (UID: "7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.149476 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.149506 5045 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.149518 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cml67\" (UniqueName: \"kubernetes.io/projected/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-kube-api-access-cml67\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.149528 5045 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-logs\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.149535 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.374048 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-79677cc74f-49kdf" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.374448 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79677cc74f-49kdf" event={"ID":"7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb","Type":"ContainerDied","Data":"a639c8196454ba65e12208c4e8d7d545386ed4fb60c286903309484c366600cf"} Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.459236 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.492122 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-79677cc74f-49kdf"] Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.506365 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-79677cc74f-49kdf"] Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.517985 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 23:51:39 crc kubenswrapper[5045]: E1125 23:51:39.593868 5045 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 25 23:51:39 crc kubenswrapper[5045]: E1125 23:51:39.594105 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n555h578hc5h569hf4h5c8hcchd7h56fh57ch64fh6bh6ch557h5cfhc5h54fh589h8bh545h56dh58ch95h89h64h68h699h7bh99hcch67dh666q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lt87g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-88c75b4b-7vjc8_openstack(a78ee35a-ac96-40b7-b9aa-92bdaadf339b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 23:51:39 crc kubenswrapper[5045]: E1125 23:51:39.596070 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-88c75b4b-7vjc8" podUID="a78ee35a-ac96-40b7-b9aa-92bdaadf339b" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.795286 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-d8b56c587-2p4tv" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.862713 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rspmb\" (UniqueName: \"kubernetes.io/projected/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-kube-api-access-rspmb\") pod \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\" (UID: \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\") " Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.862829 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-logs\") pod \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\" (UID: \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\") " Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.862856 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-scripts\") pod \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\" (UID: \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\") " Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.862911 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-horizon-secret-key\") pod \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\" (UID: \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\") " Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.863000 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-config-data\") pod \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\" (UID: \"4c4bf132-dd77-4677-a2d4-1b9ad6266d81\") " Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.863386 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-logs" (OuterVolumeSpecName: "logs") pod "4c4bf132-dd77-4677-a2d4-1b9ad6266d81" (UID: "4c4bf132-dd77-4677-a2d4-1b9ad6266d81"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.863501 5045 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-logs\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.863898 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-config-data" (OuterVolumeSpecName: "config-data") pod "4c4bf132-dd77-4677-a2d4-1b9ad6266d81" (UID: "4c4bf132-dd77-4677-a2d4-1b9ad6266d81"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.864177 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-scripts" (OuterVolumeSpecName: "scripts") pod "4c4bf132-dd77-4677-a2d4-1b9ad6266d81" (UID: "4c4bf132-dd77-4677-a2d4-1b9ad6266d81"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.867094 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "4c4bf132-dd77-4677-a2d4-1b9ad6266d81" (UID: "4c4bf132-dd77-4677-a2d4-1b9ad6266d81"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.867808 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-kube-api-access-rspmb" (OuterVolumeSpecName: "kube-api-access-rspmb") pod "4c4bf132-dd77-4677-a2d4-1b9ad6266d81" (UID: "4c4bf132-dd77-4677-a2d4-1b9ad6266d81"). InnerVolumeSpecName "kube-api-access-rspmb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.964850 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rspmb\" (UniqueName: \"kubernetes.io/projected/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-kube-api-access-rspmb\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.964873 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.964882 5045 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:39 crc kubenswrapper[5045]: I1125 23:51:39.964890 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4c4bf132-dd77-4677-a2d4-1b9ad6266d81-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:51:40 crc kubenswrapper[5045]: I1125 23:51:40.389797 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-d8b56c587-2p4tv" event={"ID":"4c4bf132-dd77-4677-a2d4-1b9ad6266d81","Type":"ContainerDied","Data":"2c9ad606cd5e08c53bb5ec52d641e55e4fbc3c3b0839c80f167bd7525f82b93c"} Nov 25 23:51:40 crc kubenswrapper[5045]: I1125 23:51:40.389843 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-d8b56c587-2p4tv" Nov 25 23:51:40 crc kubenswrapper[5045]: I1125 23:51:40.391954 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb","Type":"ContainerStarted","Data":"a995a3f066bbd0c5554ad6e8909df16d0f1b0173d03e7ce51f7adda515fac01b"} Nov 25 23:51:40 crc kubenswrapper[5045]: E1125 23:51:40.400668 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-88c75b4b-7vjc8" podUID="a78ee35a-ac96-40b7-b9aa-92bdaadf339b" Nov 25 23:51:40 crc kubenswrapper[5045]: I1125 23:51:40.421355 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e0d9560-504a-4e4d-8deb-cc36b9afe12f" path="/var/lib/kubelet/pods/5e0d9560-504a-4e4d-8deb-cc36b9afe12f/volumes" Nov 25 23:51:40 crc kubenswrapper[5045]: I1125 23:51:40.423238 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb" path="/var/lib/kubelet/pods/7ebf3d37-a217-499c-8c0c-cb3fc2cbd1eb/volumes" Nov 25 23:51:40 crc kubenswrapper[5045]: I1125 23:51:40.427890 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec7eddef-d6a8-4a39-9aaf-6bead1bebd82" path="/var/lib/kubelet/pods/ec7eddef-d6a8-4a39-9aaf-6bead1bebd82/volumes" Nov 25 23:51:40 crc kubenswrapper[5045]: I1125 23:51:40.431669 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bf6cc533-9827-4132-9d84-50fe49efef41","Type":"ContainerStarted","Data":"491a23c721742e7578f7b2389a73b8a59ad3ac3dbe24ccb4b6091c3e10111a89"} Nov 25 23:51:40 crc kubenswrapper[5045]: I1125 23:51:40.498846 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-d8b56c587-2p4tv"] Nov 25 23:51:40 crc kubenswrapper[5045]: I1125 23:51:40.507885 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-d8b56c587-2p4tv"] Nov 25 23:51:40 crc kubenswrapper[5045]: E1125 23:51:40.956370 5045 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 25 23:51:40 crc kubenswrapper[5045]: E1125 23:51:40.956656 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n65h55fh57h66bh587h655h584h7dh58dh655h5dchfch9chfh65hfbh694h76hfbh5dfh97h7hd9h5c5h9h548h66bh68ch597h58ch8ch5b8q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6654s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-747467cb48-48fv7_openstack(776ddfbe-2357-4e0e-a89e-2d82c43f4212): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 23:51:40 crc kubenswrapper[5045]: E1125 23:51:40.958667 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-747467cb48-48fv7" podUID="776ddfbe-2357-4e0e-a89e-2d82c43f4212" Nov 25 23:51:41 crc kubenswrapper[5045]: I1125 23:51:41.415494 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb","Type":"ContainerStarted","Data":"c7b95a7c878f5aece9ac559c13e7e751d147f4a4bbcfc7266aa3cf1965b67c94"} Nov 25 23:51:41 crc kubenswrapper[5045]: I1125 23:51:41.418342 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bf6cc533-9827-4132-9d84-50fe49efef41","Type":"ContainerStarted","Data":"2d431136427327c888e36f00e2f210761bda266cedad9b3e884f0649d931e36a"} Nov 25 23:51:41 crc kubenswrapper[5045]: E1125 23:51:41.419789 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-747467cb48-48fv7" podUID="776ddfbe-2357-4e0e-a89e-2d82c43f4212" Nov 25 23:51:42 crc kubenswrapper[5045]: I1125 23:51:42.398001 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:51:42 crc kubenswrapper[5045]: E1125 23:51:42.399401 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:51:42 crc kubenswrapper[5045]: I1125 23:51:42.408681 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c4bf132-dd77-4677-a2d4-1b9ad6266d81" path="/var/lib/kubelet/pods/4c4bf132-dd77-4677-a2d4-1b9ad6266d81/volumes" Nov 25 23:51:42 crc kubenswrapper[5045]: I1125 23:51:42.432459 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c46cc706-d7b4-4d2a-b75e-a8bed8a125eb","Type":"ContainerStarted","Data":"3b964d9039e28a1c047b1eed5bafd6966084084818d5abb34f7488d0b1e4003b"} Nov 25 23:51:42 crc kubenswrapper[5045]: I1125 23:51:42.437928 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bf6cc533-9827-4132-9d84-50fe49efef41","Type":"ContainerStarted","Data":"49616b2c2cca8f41b643b5e3de51b94da4ca33c7261b95a6c25548fe5710c601"} Nov 25 23:51:42 crc kubenswrapper[5045]: I1125 23:51:42.463880 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.463863333 podStartE2EDuration="4.463863333s" podCreationTimestamp="2025-11-25 23:51:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:51:42.452967039 +0000 UTC m=+3158.810626241" watchObservedRunningTime="2025-11-25 23:51:42.463863333 +0000 UTC m=+3158.821522445" Nov 25 23:51:42 crc kubenswrapper[5045]: I1125 23:51:42.479487 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.479459217 podStartE2EDuration="4.479459217s" podCreationTimestamp="2025-11-25 23:51:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:51:42.478582063 +0000 UTC m=+3158.836241195" watchObservedRunningTime="2025-11-25 23:51:42.479459217 +0000 UTC m=+3158.837118339" Nov 25 23:51:46 crc kubenswrapper[5045]: I1125 23:51:46.472156 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-fjsxq" event={"ID":"8b764d3f-bf9c-4407-b5d5-6f2834714d50","Type":"ContainerStarted","Data":"57a08480fcd1cae4536ecd417d818938b46a68f8d998252108e14864baac41bb"} Nov 25 23:51:46 crc kubenswrapper[5045]: I1125 23:51:46.506470 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-sync-fjsxq" podStartSLOduration=7.750835016 podStartE2EDuration="16.506444523s" podCreationTimestamp="2025-11-25 23:51:30 +0000 UTC" firstStartedPulling="2025-11-25 23:51:36.751624419 +0000 UTC m=+3153.109283541" lastFinishedPulling="2025-11-25 23:51:45.507233936 +0000 UTC m=+3161.864893048" observedRunningTime="2025-11-25 23:51:46.495133628 +0000 UTC m=+3162.852792750" watchObservedRunningTime="2025-11-25 23:51:46.506444523 +0000 UTC m=+3162.864103655" Nov 25 23:51:48 crc kubenswrapper[5045]: I1125 23:51:48.882794 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 23:51:48 crc kubenswrapper[5045]: I1125 23:51:48.883190 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 23:51:48 crc kubenswrapper[5045]: I1125 23:51:48.897768 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:48 crc kubenswrapper[5045]: I1125 23:51:48.898153 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:48 crc kubenswrapper[5045]: I1125 23:51:48.936273 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 23:51:48 crc kubenswrapper[5045]: I1125 23:51:48.951022 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:48 crc kubenswrapper[5045]: I1125 23:51:48.952817 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 23:51:48 crc kubenswrapper[5045]: I1125 23:51:48.962152 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:49 crc kubenswrapper[5045]: I1125 23:51:49.503075 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:49 crc kubenswrapper[5045]: I1125 23:51:49.503145 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:49 crc kubenswrapper[5045]: I1125 23:51:49.503168 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 23:51:49 crc kubenswrapper[5045]: I1125 23:51:49.503189 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 23:51:53 crc kubenswrapper[5045]: I1125 23:51:53.024929 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:53 crc kubenswrapper[5045]: I1125 23:51:53.025589 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 23:51:53 crc kubenswrapper[5045]: I1125 23:51:53.032227 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 23:51:53 crc kubenswrapper[5045]: I1125 23:51:53.032348 5045 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 23:51:53 crc kubenswrapper[5045]: I1125 23:51:53.037418 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 23:51:53 crc kubenswrapper[5045]: I1125 23:51:53.397024 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:51:53 crc kubenswrapper[5045]: E1125 23:51:53.397378 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:51:55 crc kubenswrapper[5045]: I1125 23:51:55.562051 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-747467cb48-48fv7" event={"ID":"776ddfbe-2357-4e0e-a89e-2d82c43f4212","Type":"ContainerStarted","Data":"2cdb23388ba43fd01d1a47a0b8ea0d232d690d291e71b31e1ab3f0b5b14f72e8"} Nov 25 23:51:55 crc kubenswrapper[5045]: I1125 23:51:55.562641 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-747467cb48-48fv7" event={"ID":"776ddfbe-2357-4e0e-a89e-2d82c43f4212","Type":"ContainerStarted","Data":"2d5e561c81f6531612a014b9520341369e949776b29c85765f231ed2088d955f"} Nov 25 23:51:55 crc kubenswrapper[5045]: I1125 23:51:55.599934 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-747467cb48-48fv7" podStartSLOduration=2.195651449 podStartE2EDuration="33.599913712s" podCreationTimestamp="2025-11-25 23:51:22 +0000 UTC" firstStartedPulling="2025-11-25 23:51:23.53004121 +0000 UTC m=+3139.887700322" lastFinishedPulling="2025-11-25 23:51:54.934303473 +0000 UTC m=+3171.291962585" observedRunningTime="2025-11-25 23:51:55.588188087 +0000 UTC m=+3171.945847219" watchObservedRunningTime="2025-11-25 23:51:55.599913712 +0000 UTC m=+3171.957572844" Nov 25 23:51:56 crc kubenswrapper[5045]: I1125 23:51:56.576552 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-88c75b4b-7vjc8" event={"ID":"a78ee35a-ac96-40b7-b9aa-92bdaadf339b","Type":"ContainerStarted","Data":"c897cc2e52c671d0a67ed228f48accea2bd35247093a4f31feedb7e7466ffbb9"} Nov 25 23:51:56 crc kubenswrapper[5045]: I1125 23:51:56.577019 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-88c75b4b-7vjc8" event={"ID":"a78ee35a-ac96-40b7-b9aa-92bdaadf339b","Type":"ContainerStarted","Data":"322c4219634ea44f8daca19e2446ba0edca3c7b070b77471bb0d78c4aa696770"} Nov 25 23:51:56 crc kubenswrapper[5045]: I1125 23:51:56.603991 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-88c75b4b-7vjc8" podStartSLOduration=-9223372002.250801 podStartE2EDuration="34.603974393s" podCreationTimestamp="2025-11-25 23:51:22 +0000 UTC" firstStartedPulling="2025-11-25 23:51:23.65889486 +0000 UTC m=+3140.016553972" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:51:56.602011179 +0000 UTC m=+3172.959670301" watchObservedRunningTime="2025-11-25 23:51:56.603974393 +0000 UTC m=+3172.961633515" Nov 25 23:51:59 crc kubenswrapper[5045]: I1125 23:51:59.638187 5045 generic.go:334] "Generic (PLEG): container finished" podID="8b764d3f-bf9c-4407-b5d5-6f2834714d50" containerID="57a08480fcd1cae4536ecd417d818938b46a68f8d998252108e14864baac41bb" exitCode=0 Nov 25 23:51:59 crc kubenswrapper[5045]: I1125 23:51:59.638331 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-fjsxq" event={"ID":"8b764d3f-bf9c-4407-b5d5-6f2834714d50","Type":"ContainerDied","Data":"57a08480fcd1cae4536ecd417d818938b46a68f8d998252108e14864baac41bb"} Nov 25 23:52:01 crc kubenswrapper[5045]: I1125 23:52:01.052898 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-fjsxq" Nov 25 23:52:01 crc kubenswrapper[5045]: I1125 23:52:01.171834 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b764d3f-bf9c-4407-b5d5-6f2834714d50-combined-ca-bundle\") pod \"8b764d3f-bf9c-4407-b5d5-6f2834714d50\" (UID: \"8b764d3f-bf9c-4407-b5d5-6f2834714d50\") " Nov 25 23:52:01 crc kubenswrapper[5045]: I1125 23:52:01.172163 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b764d3f-bf9c-4407-b5d5-6f2834714d50-config-data\") pod \"8b764d3f-bf9c-4407-b5d5-6f2834714d50\" (UID: \"8b764d3f-bf9c-4407-b5d5-6f2834714d50\") " Nov 25 23:52:01 crc kubenswrapper[5045]: I1125 23:52:01.173029 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n79rb\" (UniqueName: \"kubernetes.io/projected/8b764d3f-bf9c-4407-b5d5-6f2834714d50-kube-api-access-n79rb\") pod \"8b764d3f-bf9c-4407-b5d5-6f2834714d50\" (UID: \"8b764d3f-bf9c-4407-b5d5-6f2834714d50\") " Nov 25 23:52:01 crc kubenswrapper[5045]: I1125 23:52:01.173445 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/8b764d3f-bf9c-4407-b5d5-6f2834714d50-job-config-data\") pod \"8b764d3f-bf9c-4407-b5d5-6f2834714d50\" (UID: \"8b764d3f-bf9c-4407-b5d5-6f2834714d50\") " Nov 25 23:52:01 crc kubenswrapper[5045]: I1125 23:52:01.177872 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b764d3f-bf9c-4407-b5d5-6f2834714d50-job-config-data" (OuterVolumeSpecName: "job-config-data") pod "8b764d3f-bf9c-4407-b5d5-6f2834714d50" (UID: "8b764d3f-bf9c-4407-b5d5-6f2834714d50"). InnerVolumeSpecName "job-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:01 crc kubenswrapper[5045]: I1125 23:52:01.177988 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b764d3f-bf9c-4407-b5d5-6f2834714d50-kube-api-access-n79rb" (OuterVolumeSpecName: "kube-api-access-n79rb") pod "8b764d3f-bf9c-4407-b5d5-6f2834714d50" (UID: "8b764d3f-bf9c-4407-b5d5-6f2834714d50"). InnerVolumeSpecName "kube-api-access-n79rb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:52:01 crc kubenswrapper[5045]: I1125 23:52:01.199096 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b764d3f-bf9c-4407-b5d5-6f2834714d50-config-data" (OuterVolumeSpecName: "config-data") pod "8b764d3f-bf9c-4407-b5d5-6f2834714d50" (UID: "8b764d3f-bf9c-4407-b5d5-6f2834714d50"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:01 crc kubenswrapper[5045]: I1125 23:52:01.210621 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b764d3f-bf9c-4407-b5d5-6f2834714d50-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8b764d3f-bf9c-4407-b5d5-6f2834714d50" (UID: "8b764d3f-bf9c-4407-b5d5-6f2834714d50"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:01 crc kubenswrapper[5045]: I1125 23:52:01.277102 5045 reconciler_common.go:293] "Volume detached for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/8b764d3f-bf9c-4407-b5d5-6f2834714d50-job-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:01 crc kubenswrapper[5045]: I1125 23:52:01.277128 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b764d3f-bf9c-4407-b5d5-6f2834714d50-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:01 crc kubenswrapper[5045]: I1125 23:52:01.277139 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b764d3f-bf9c-4407-b5d5-6f2834714d50-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:01 crc kubenswrapper[5045]: I1125 23:52:01.277148 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n79rb\" (UniqueName: \"kubernetes.io/projected/8b764d3f-bf9c-4407-b5d5-6f2834714d50-kube-api-access-n79rb\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:01 crc kubenswrapper[5045]: I1125 23:52:01.660936 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-fjsxq" event={"ID":"8b764d3f-bf9c-4407-b5d5-6f2834714d50","Type":"ContainerDied","Data":"0217bc4620f842cef5d2676cdb54780507b82bf8fecfcb2d9223843e6cc7a094"} Nov 25 23:52:01 crc kubenswrapper[5045]: I1125 23:52:01.661079 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0217bc4620f842cef5d2676cdb54780507b82bf8fecfcb2d9223843e6cc7a094" Nov 25 23:52:01 crc kubenswrapper[5045]: I1125 23:52:01.661027 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-fjsxq" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.527056 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 23:52:02 crc kubenswrapper[5045]: E1125 23:52:02.527439 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b764d3f-bf9c-4407-b5d5-6f2834714d50" containerName="manila-db-sync" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.527450 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b764d3f-bf9c-4407-b5d5-6f2834714d50" containerName="manila-db-sync" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.527664 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b764d3f-bf9c-4407-b5d5-6f2834714d50" containerName="manila-db-sync" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.528583 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.541659 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scripts" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.541880 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.542016 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-jrkhj" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.542185 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.555185 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.556689 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.560047 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.571706 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.600150 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.701622 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.703384 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.707665 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/74380024-104d-416f-a8e6-a6c13b16950f-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.707696 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-config-data\") pod \"manila-scheduler-0\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.707986 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.708033 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-config-data\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.708052 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/bfa1937a-1752-4377-adaa-651a09c43cc9-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.708078 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-scripts\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.708099 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nz5jp\" (UniqueName: \"kubernetes.io/projected/74380024-104d-416f-a8e6-a6c13b16950f-kube-api-access-nz5jp\") pod \"manila-scheduler-0\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.708123 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bfa1937a-1752-4377-adaa-651a09c43cc9-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.708150 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.708172 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.708198 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.708226 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/bfa1937a-1752-4377-adaa-651a09c43cc9-ceph\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.708257 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kh6vw\" (UniqueName: \"kubernetes.io/projected/bfa1937a-1752-4377-adaa-651a09c43cc9-kube-api-access-kh6vw\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.708276 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-scripts\") pod \"manila-scheduler-0\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.710477 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.717534 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-69655fd4bf-4xxtm"] Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.719065 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.741790 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.760786 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-69655fd4bf-4xxtm"] Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.812777 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.812826 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/bfa1937a-1752-4377-adaa-651a09c43cc9-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.812866 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-scripts\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813071 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzc66\" (UniqueName: \"kubernetes.io/projected/811edefd-608a-445b-97fc-911959f84ff2-kube-api-access-dzc66\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813092 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nz5jp\" (UniqueName: \"kubernetes.io/projected/74380024-104d-416f-a8e6-a6c13b16950f-kube-api-access-nz5jp\") pod \"manila-scheduler-0\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813114 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-scripts\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813142 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1fefc039-452f-4d2f-a4f8-2e73833e05f6-dns-svc\") pod \"dnsmasq-dns-69655fd4bf-4xxtm\" (UID: \"1fefc039-452f-4d2f-a4f8-2e73833e05f6\") " pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813166 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bfa1937a-1752-4377-adaa-651a09c43cc9-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813190 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/811edefd-608a-445b-97fc-911959f84ff2-logs\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813217 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-config-data-custom\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813231 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/bfa1937a-1752-4377-adaa-651a09c43cc9-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813243 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813322 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813382 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1fefc039-452f-4d2f-a4f8-2e73833e05f6-ovsdbserver-sb\") pod \"dnsmasq-dns-69655fd4bf-4xxtm\" (UID: \"1fefc039-452f-4d2f-a4f8-2e73833e05f6\") " pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813414 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813462 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/bfa1937a-1752-4377-adaa-651a09c43cc9-ceph\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813571 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kh6vw\" (UniqueName: \"kubernetes.io/projected/bfa1937a-1752-4377-adaa-651a09c43cc9-kube-api-access-kh6vw\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813627 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-scripts\") pod \"manila-scheduler-0\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813674 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1fefc039-452f-4d2f-a4f8-2e73833e05f6-ovsdbserver-nb\") pod \"dnsmasq-dns-69655fd4bf-4xxtm\" (UID: \"1fefc039-452f-4d2f-a4f8-2e73833e05f6\") " pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813704 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4crrn\" (UniqueName: \"kubernetes.io/projected/1fefc039-452f-4d2f-a4f8-2e73833e05f6-kube-api-access-4crrn\") pod \"dnsmasq-dns-69655fd4bf-4xxtm\" (UID: \"1fefc039-452f-4d2f-a4f8-2e73833e05f6\") " pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813811 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1fefc039-452f-4d2f-a4f8-2e73833e05f6-config\") pod \"dnsmasq-dns-69655fd4bf-4xxtm\" (UID: \"1fefc039-452f-4d2f-a4f8-2e73833e05f6\") " pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813834 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/74380024-104d-416f-a8e6-a6c13b16950f-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813851 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-config-data\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813891 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-config-data\") pod \"manila-scheduler-0\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813918 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/811edefd-608a-445b-97fc-911959f84ff2-etc-machine-id\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.813995 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.814000 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bfa1937a-1752-4377-adaa-651a09c43cc9-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.814015 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/1fefc039-452f-4d2f-a4f8-2e73833e05f6-openstack-edpm-ipam\") pod \"dnsmasq-dns-69655fd4bf-4xxtm\" (UID: \"1fefc039-452f-4d2f-a4f8-2e73833e05f6\") " pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.814060 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/74380024-104d-416f-a8e6-a6c13b16950f-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.814431 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-config-data\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.822342 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-scripts\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.824447 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/bfa1937a-1752-4377-adaa-651a09c43cc9-ceph\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.826541 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.826907 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.828242 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.829056 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-scripts\") pod \"manila-scheduler-0\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.829779 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-config-data\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.839558 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-config-data\") pod \"manila-scheduler-0\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.844248 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nz5jp\" (UniqueName: \"kubernetes.io/projected/74380024-104d-416f-a8e6-a6c13b16950f-kube-api-access-nz5jp\") pod \"manila-scheduler-0\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.844387 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.848419 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kh6vw\" (UniqueName: \"kubernetes.io/projected/bfa1937a-1752-4377-adaa-651a09c43cc9-kube-api-access-kh6vw\") pod \"manila-share-share1-0\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.860240 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.887285 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.916651 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1fefc039-452f-4d2f-a4f8-2e73833e05f6-ovsdbserver-nb\") pod \"dnsmasq-dns-69655fd4bf-4xxtm\" (UID: \"1fefc039-452f-4d2f-a4f8-2e73833e05f6\") " pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.916693 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4crrn\" (UniqueName: \"kubernetes.io/projected/1fefc039-452f-4d2f-a4f8-2e73833e05f6-kube-api-access-4crrn\") pod \"dnsmasq-dns-69655fd4bf-4xxtm\" (UID: \"1fefc039-452f-4d2f-a4f8-2e73833e05f6\") " pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.916767 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1fefc039-452f-4d2f-a4f8-2e73833e05f6-config\") pod \"dnsmasq-dns-69655fd4bf-4xxtm\" (UID: \"1fefc039-452f-4d2f-a4f8-2e73833e05f6\") " pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.916789 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-config-data\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.916813 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/811edefd-608a-445b-97fc-911959f84ff2-etc-machine-id\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.916843 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/1fefc039-452f-4d2f-a4f8-2e73833e05f6-openstack-edpm-ipam\") pod \"dnsmasq-dns-69655fd4bf-4xxtm\" (UID: \"1fefc039-452f-4d2f-a4f8-2e73833e05f6\") " pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.916872 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.916900 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzc66\" (UniqueName: \"kubernetes.io/projected/811edefd-608a-445b-97fc-911959f84ff2-kube-api-access-dzc66\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.916918 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-scripts\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.916939 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1fefc039-452f-4d2f-a4f8-2e73833e05f6-dns-svc\") pod \"dnsmasq-dns-69655fd4bf-4xxtm\" (UID: \"1fefc039-452f-4d2f-a4f8-2e73833e05f6\") " pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.916961 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/811edefd-608a-445b-97fc-911959f84ff2-logs\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.916980 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-config-data-custom\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.917182 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1fefc039-452f-4d2f-a4f8-2e73833e05f6-ovsdbserver-sb\") pod \"dnsmasq-dns-69655fd4bf-4xxtm\" (UID: \"1fefc039-452f-4d2f-a4f8-2e73833e05f6\") " pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.918427 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1fefc039-452f-4d2f-a4f8-2e73833e05f6-ovsdbserver-sb\") pod \"dnsmasq-dns-69655fd4bf-4xxtm\" (UID: \"1fefc039-452f-4d2f-a4f8-2e73833e05f6\") " pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.919319 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1fefc039-452f-4d2f-a4f8-2e73833e05f6-dns-svc\") pod \"dnsmasq-dns-69655fd4bf-4xxtm\" (UID: \"1fefc039-452f-4d2f-a4f8-2e73833e05f6\") " pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.919491 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1fefc039-452f-4d2f-a4f8-2e73833e05f6-config\") pod \"dnsmasq-dns-69655fd4bf-4xxtm\" (UID: \"1fefc039-452f-4d2f-a4f8-2e73833e05f6\") " pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.920114 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1fefc039-452f-4d2f-a4f8-2e73833e05f6-ovsdbserver-nb\") pod \"dnsmasq-dns-69655fd4bf-4xxtm\" (UID: \"1fefc039-452f-4d2f-a4f8-2e73833e05f6\") " pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.920311 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/1fefc039-452f-4d2f-a4f8-2e73833e05f6-openstack-edpm-ipam\") pod \"dnsmasq-dns-69655fd4bf-4xxtm\" (UID: \"1fefc039-452f-4d2f-a4f8-2e73833e05f6\") " pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.920583 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/811edefd-608a-445b-97fc-911959f84ff2-logs\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.920631 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/811edefd-608a-445b-97fc-911959f84ff2-etc-machine-id\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.926817 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.927163 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-scripts\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.927412 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-config-data-custom\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.933501 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-config-data\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.937445 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4crrn\" (UniqueName: \"kubernetes.io/projected/1fefc039-452f-4d2f-a4f8-2e73833e05f6-kube-api-access-4crrn\") pod \"dnsmasq-dns-69655fd4bf-4xxtm\" (UID: \"1fefc039-452f-4d2f-a4f8-2e73833e05f6\") " pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:02 crc kubenswrapper[5045]: I1125 23:52:02.937872 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzc66\" (UniqueName: \"kubernetes.io/projected/811edefd-608a-445b-97fc-911959f84ff2-kube-api-access-dzc66\") pod \"manila-api-0\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " pod="openstack/manila-api-0" Nov 25 23:52:03 crc kubenswrapper[5045]: I1125 23:52:03.009105 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:52:03 crc kubenswrapper[5045]: I1125 23:52:03.009819 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:52:03 crc kubenswrapper[5045]: I1125 23:52:03.037322 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 25 23:52:03 crc kubenswrapper[5045]: I1125 23:52:03.045325 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:03 crc kubenswrapper[5045]: I1125 23:52:03.067875 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:52:03 crc kubenswrapper[5045]: I1125 23:52:03.068462 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:52:03 crc kubenswrapper[5045]: I1125 23:52:03.472218 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 23:52:03 crc kubenswrapper[5045]: I1125 23:52:03.638866 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 23:52:03 crc kubenswrapper[5045]: I1125 23:52:03.691451 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"74380024-104d-416f-a8e6-a6c13b16950f","Type":"ContainerStarted","Data":"f9e2d1543472749699e8ffd9c9cd43f671539288588b08301e9c4e6ef28224cf"} Nov 25 23:52:03 crc kubenswrapper[5045]: I1125 23:52:03.692564 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"bfa1937a-1752-4377-adaa-651a09c43cc9","Type":"ContainerStarted","Data":"8cc8511f1b98788f3694b57c94b99e75076366c5adfff5fbf6fa89d9faf03e43"} Nov 25 23:52:03 crc kubenswrapper[5045]: W1125 23:52:03.723845 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1fefc039_452f_4d2f_a4f8_2e73833e05f6.slice/crio-f6561dfbc3bdd2de1cbea90ce48baf39f90f2bce8de36fb8ab02f9a7029a6abf WatchSource:0}: Error finding container f6561dfbc3bdd2de1cbea90ce48baf39f90f2bce8de36fb8ab02f9a7029a6abf: Status 404 returned error can't find the container with id f6561dfbc3bdd2de1cbea90ce48baf39f90f2bce8de36fb8ab02f9a7029a6abf Nov 25 23:52:03 crc kubenswrapper[5045]: I1125 23:52:03.723860 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-69655fd4bf-4xxtm"] Nov 25 23:52:03 crc kubenswrapper[5045]: W1125 23:52:03.725687 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod811edefd_608a_445b_97fc_911959f84ff2.slice/crio-00f5eb8438eb1af66e2e64ee7494b6118f7125fada530b648aa2442c7799f827 WatchSource:0}: Error finding container 00f5eb8438eb1af66e2e64ee7494b6118f7125fada530b648aa2442c7799f827: Status 404 returned error can't find the container with id 00f5eb8438eb1af66e2e64ee7494b6118f7125fada530b648aa2442c7799f827 Nov 25 23:52:03 crc kubenswrapper[5045]: I1125 23:52:03.732787 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 25 23:52:04 crc kubenswrapper[5045]: I1125 23:52:04.701610 5045 generic.go:334] "Generic (PLEG): container finished" podID="1fefc039-452f-4d2f-a4f8-2e73833e05f6" containerID="9976fdf5c8b3838a93b1a5b9ccd09309cffc206a5afb92a0412eaa79d0f29fea" exitCode=0 Nov 25 23:52:04 crc kubenswrapper[5045]: I1125 23:52:04.702908 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" event={"ID":"1fefc039-452f-4d2f-a4f8-2e73833e05f6","Type":"ContainerDied","Data":"9976fdf5c8b3838a93b1a5b9ccd09309cffc206a5afb92a0412eaa79d0f29fea"} Nov 25 23:52:04 crc kubenswrapper[5045]: I1125 23:52:04.702938 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" event={"ID":"1fefc039-452f-4d2f-a4f8-2e73833e05f6","Type":"ContainerStarted","Data":"f6561dfbc3bdd2de1cbea90ce48baf39f90f2bce8de36fb8ab02f9a7029a6abf"} Nov 25 23:52:04 crc kubenswrapper[5045]: I1125 23:52:04.709222 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"811edefd-608a-445b-97fc-911959f84ff2","Type":"ContainerStarted","Data":"e935c3eda180543c48a0c93f24258668ca50f2f099b51deea818263760697d1e"} Nov 25 23:52:04 crc kubenswrapper[5045]: I1125 23:52:04.709263 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"811edefd-608a-445b-97fc-911959f84ff2","Type":"ContainerStarted","Data":"00f5eb8438eb1af66e2e64ee7494b6118f7125fada530b648aa2442c7799f827"} Nov 25 23:52:05 crc kubenswrapper[5045]: I1125 23:52:05.068746 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-api-0"] Nov 25 23:52:05 crc kubenswrapper[5045]: I1125 23:52:05.396452 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:52:05 crc kubenswrapper[5045]: E1125 23:52:05.396775 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:52:05 crc kubenswrapper[5045]: I1125 23:52:05.722941 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"811edefd-608a-445b-97fc-911959f84ff2","Type":"ContainerStarted","Data":"84738d0cc6bdc2e173e1872afcc41e51b4fcbd1bc198436b5ee929758d3a1223"} Nov 25 23:52:05 crc kubenswrapper[5045]: I1125 23:52:05.723244 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 25 23:52:05 crc kubenswrapper[5045]: I1125 23:52:05.725947 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"74380024-104d-416f-a8e6-a6c13b16950f","Type":"ContainerStarted","Data":"732fe7fc9e3c5937c08c09bf6894190f42661057c12d08057d5ae96a76d6d8b7"} Nov 25 23:52:05 crc kubenswrapper[5045]: I1125 23:52:05.725988 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"74380024-104d-416f-a8e6-a6c13b16950f","Type":"ContainerStarted","Data":"55c00e1ae84dee9a49b562189ae48d9393d8e11f69d5e551427a93d49dcaa358"} Nov 25 23:52:05 crc kubenswrapper[5045]: I1125 23:52:05.728129 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" event={"ID":"1fefc039-452f-4d2f-a4f8-2e73833e05f6","Type":"ContainerStarted","Data":"5f556539a9f992854e5c42ff2b25f9e127815b130fb13bddf086d1315259d6ea"} Nov 25 23:52:05 crc kubenswrapper[5045]: I1125 23:52:05.728352 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:05 crc kubenswrapper[5045]: I1125 23:52:05.743966 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=3.743950135 podStartE2EDuration="3.743950135s" podCreationTimestamp="2025-11-25 23:52:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:52:05.737367673 +0000 UTC m=+3182.095026785" watchObservedRunningTime="2025-11-25 23:52:05.743950135 +0000 UTC m=+3182.101609247" Nov 25 23:52:05 crc kubenswrapper[5045]: I1125 23:52:05.787287 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=3.09727273 podStartE2EDuration="3.787260885s" podCreationTimestamp="2025-11-25 23:52:02 +0000 UTC" firstStartedPulling="2025-11-25 23:52:03.479572528 +0000 UTC m=+3179.837231640" lastFinishedPulling="2025-11-25 23:52:04.169560643 +0000 UTC m=+3180.527219795" observedRunningTime="2025-11-25 23:52:05.777682949 +0000 UTC m=+3182.135342071" watchObservedRunningTime="2025-11-25 23:52:05.787260885 +0000 UTC m=+3182.144920007" Nov 25 23:52:05 crc kubenswrapper[5045]: I1125 23:52:05.794274 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" podStartSLOduration=3.794253558 podStartE2EDuration="3.794253558s" podCreationTimestamp="2025-11-25 23:52:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:52:05.7618264 +0000 UTC m=+3182.119485532" watchObservedRunningTime="2025-11-25 23:52:05.794253558 +0000 UTC m=+3182.151912670" Nov 25 23:52:06 crc kubenswrapper[5045]: I1125 23:52:06.736916 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="811edefd-608a-445b-97fc-911959f84ff2" containerName="manila-api-log" containerID="cri-o://e935c3eda180543c48a0c93f24258668ca50f2f099b51deea818263760697d1e" gracePeriod=30 Nov 25 23:52:06 crc kubenswrapper[5045]: I1125 23:52:06.736978 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="811edefd-608a-445b-97fc-911959f84ff2" containerName="manila-api" containerID="cri-o://84738d0cc6bdc2e173e1872afcc41e51b4fcbd1bc198436b5ee929758d3a1223" gracePeriod=30 Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.391507 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.465556 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-scripts\") pod \"811edefd-608a-445b-97fc-911959f84ff2\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.466041 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/811edefd-608a-445b-97fc-911959f84ff2-logs\") pod \"811edefd-608a-445b-97fc-911959f84ff2\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.466410 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-config-data\") pod \"811edefd-608a-445b-97fc-911959f84ff2\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.466460 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/811edefd-608a-445b-97fc-911959f84ff2-etc-machine-id\") pod \"811edefd-608a-445b-97fc-911959f84ff2\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.466558 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dzc66\" (UniqueName: \"kubernetes.io/projected/811edefd-608a-445b-97fc-911959f84ff2-kube-api-access-dzc66\") pod \"811edefd-608a-445b-97fc-911959f84ff2\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.466646 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-config-data-custom\") pod \"811edefd-608a-445b-97fc-911959f84ff2\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.466690 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-combined-ca-bundle\") pod \"811edefd-608a-445b-97fc-911959f84ff2\" (UID: \"811edefd-608a-445b-97fc-911959f84ff2\") " Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.468046 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/811edefd-608a-445b-97fc-911959f84ff2-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "811edefd-608a-445b-97fc-911959f84ff2" (UID: "811edefd-608a-445b-97fc-911959f84ff2"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.468296 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/811edefd-608a-445b-97fc-911959f84ff2-logs" (OuterVolumeSpecName: "logs") pod "811edefd-608a-445b-97fc-911959f84ff2" (UID: "811edefd-608a-445b-97fc-911959f84ff2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.468689 5045 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/811edefd-608a-445b-97fc-911959f84ff2-logs\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.468822 5045 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/811edefd-608a-445b-97fc-911959f84ff2-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.477891 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "811edefd-608a-445b-97fc-911959f84ff2" (UID: "811edefd-608a-445b-97fc-911959f84ff2"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.478403 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/811edefd-608a-445b-97fc-911959f84ff2-kube-api-access-dzc66" (OuterVolumeSpecName: "kube-api-access-dzc66") pod "811edefd-608a-445b-97fc-911959f84ff2" (UID: "811edefd-608a-445b-97fc-911959f84ff2"). InnerVolumeSpecName "kube-api-access-dzc66". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.483000 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-scripts" (OuterVolumeSpecName: "scripts") pod "811edefd-608a-445b-97fc-911959f84ff2" (UID: "811edefd-608a-445b-97fc-911959f84ff2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.498804 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "811edefd-608a-445b-97fc-911959f84ff2" (UID: "811edefd-608a-445b-97fc-911959f84ff2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.556794 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-config-data" (OuterVolumeSpecName: "config-data") pod "811edefd-608a-445b-97fc-911959f84ff2" (UID: "811edefd-608a-445b-97fc-911959f84ff2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.571295 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dzc66\" (UniqueName: \"kubernetes.io/projected/811edefd-608a-445b-97fc-911959f84ff2-kube-api-access-dzc66\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.571329 5045 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.571339 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.571365 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.571375 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/811edefd-608a-445b-97fc-911959f84ff2-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.750204 5045 generic.go:334] "Generic (PLEG): container finished" podID="811edefd-608a-445b-97fc-911959f84ff2" containerID="84738d0cc6bdc2e173e1872afcc41e51b4fcbd1bc198436b5ee929758d3a1223" exitCode=0 Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.750242 5045 generic.go:334] "Generic (PLEG): container finished" podID="811edefd-608a-445b-97fc-911959f84ff2" containerID="e935c3eda180543c48a0c93f24258668ca50f2f099b51deea818263760697d1e" exitCode=143 Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.750264 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"811edefd-608a-445b-97fc-911959f84ff2","Type":"ContainerDied","Data":"84738d0cc6bdc2e173e1872afcc41e51b4fcbd1bc198436b5ee929758d3a1223"} Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.750292 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"811edefd-608a-445b-97fc-911959f84ff2","Type":"ContainerDied","Data":"e935c3eda180543c48a0c93f24258668ca50f2f099b51deea818263760697d1e"} Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.750306 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"811edefd-608a-445b-97fc-911959f84ff2","Type":"ContainerDied","Data":"00f5eb8438eb1af66e2e64ee7494b6118f7125fada530b648aa2442c7799f827"} Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.750290 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.750320 5045 scope.go:117] "RemoveContainer" containerID="84738d0cc6bdc2e173e1872afcc41e51b4fcbd1bc198436b5ee929758d3a1223" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.778902 5045 scope.go:117] "RemoveContainer" containerID="e935c3eda180543c48a0c93f24258668ca50f2f099b51deea818263760697d1e" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.793780 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-api-0"] Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.802824 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-api-0"] Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.812549 5045 scope.go:117] "RemoveContainer" containerID="84738d0cc6bdc2e173e1872afcc41e51b4fcbd1bc198436b5ee929758d3a1223" Nov 25 23:52:07 crc kubenswrapper[5045]: E1125 23:52:07.812964 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84738d0cc6bdc2e173e1872afcc41e51b4fcbd1bc198436b5ee929758d3a1223\": container with ID starting with 84738d0cc6bdc2e173e1872afcc41e51b4fcbd1bc198436b5ee929758d3a1223 not found: ID does not exist" containerID="84738d0cc6bdc2e173e1872afcc41e51b4fcbd1bc198436b5ee929758d3a1223" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.812990 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84738d0cc6bdc2e173e1872afcc41e51b4fcbd1bc198436b5ee929758d3a1223"} err="failed to get container status \"84738d0cc6bdc2e173e1872afcc41e51b4fcbd1bc198436b5ee929758d3a1223\": rpc error: code = NotFound desc = could not find container \"84738d0cc6bdc2e173e1872afcc41e51b4fcbd1bc198436b5ee929758d3a1223\": container with ID starting with 84738d0cc6bdc2e173e1872afcc41e51b4fcbd1bc198436b5ee929758d3a1223 not found: ID does not exist" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.813009 5045 scope.go:117] "RemoveContainer" containerID="e935c3eda180543c48a0c93f24258668ca50f2f099b51deea818263760697d1e" Nov 25 23:52:07 crc kubenswrapper[5045]: E1125 23:52:07.813166 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e935c3eda180543c48a0c93f24258668ca50f2f099b51deea818263760697d1e\": container with ID starting with e935c3eda180543c48a0c93f24258668ca50f2f099b51deea818263760697d1e not found: ID does not exist" containerID="e935c3eda180543c48a0c93f24258668ca50f2f099b51deea818263760697d1e" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.813180 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e935c3eda180543c48a0c93f24258668ca50f2f099b51deea818263760697d1e"} err="failed to get container status \"e935c3eda180543c48a0c93f24258668ca50f2f099b51deea818263760697d1e\": rpc error: code = NotFound desc = could not find container \"e935c3eda180543c48a0c93f24258668ca50f2f099b51deea818263760697d1e\": container with ID starting with e935c3eda180543c48a0c93f24258668ca50f2f099b51deea818263760697d1e not found: ID does not exist" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.813192 5045 scope.go:117] "RemoveContainer" containerID="84738d0cc6bdc2e173e1872afcc41e51b4fcbd1bc198436b5ee929758d3a1223" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.813333 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84738d0cc6bdc2e173e1872afcc41e51b4fcbd1bc198436b5ee929758d3a1223"} err="failed to get container status \"84738d0cc6bdc2e173e1872afcc41e51b4fcbd1bc198436b5ee929758d3a1223\": rpc error: code = NotFound desc = could not find container \"84738d0cc6bdc2e173e1872afcc41e51b4fcbd1bc198436b5ee929758d3a1223\": container with ID starting with 84738d0cc6bdc2e173e1872afcc41e51b4fcbd1bc198436b5ee929758d3a1223 not found: ID does not exist" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.813347 5045 scope.go:117] "RemoveContainer" containerID="e935c3eda180543c48a0c93f24258668ca50f2f099b51deea818263760697d1e" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.813525 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e935c3eda180543c48a0c93f24258668ca50f2f099b51deea818263760697d1e"} err="failed to get container status \"e935c3eda180543c48a0c93f24258668ca50f2f099b51deea818263760697d1e\": rpc error: code = NotFound desc = could not find container \"e935c3eda180543c48a0c93f24258668ca50f2f099b51deea818263760697d1e\": container with ID starting with e935c3eda180543c48a0c93f24258668ca50f2f099b51deea818263760697d1e not found: ID does not exist" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.825010 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Nov 25 23:52:07 crc kubenswrapper[5045]: E1125 23:52:07.825476 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="811edefd-608a-445b-97fc-911959f84ff2" containerName="manila-api" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.825496 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="811edefd-608a-445b-97fc-911959f84ff2" containerName="manila-api" Nov 25 23:52:07 crc kubenswrapper[5045]: E1125 23:52:07.825518 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="811edefd-608a-445b-97fc-911959f84ff2" containerName="manila-api-log" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.825526 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="811edefd-608a-445b-97fc-911959f84ff2" containerName="manila-api-log" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.825739 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="811edefd-608a-445b-97fc-911959f84ff2" containerName="manila-api-log" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.825764 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="811edefd-608a-445b-97fc-911959f84ff2" containerName="manila-api" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.826800 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.829624 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.829903 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-manila-public-svc" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.830355 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-manila-internal-svc" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.839997 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.882112 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a1b4a8a-456e-4756-8321-079731f5f729-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.882195 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a1b4a8a-456e-4756-8321-079731f5f729-scripts\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.882261 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a1b4a8a-456e-4756-8321-079731f5f729-internal-tls-certs\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.882327 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0a1b4a8a-456e-4756-8321-079731f5f729-etc-machine-id\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.882361 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a1b4a8a-456e-4756-8321-079731f5f729-config-data\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.882385 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0a1b4a8a-456e-4756-8321-079731f5f729-config-data-custom\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.882404 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a1b4a8a-456e-4756-8321-079731f5f729-logs\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.882428 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a1b4a8a-456e-4756-8321-079731f5f729-public-tls-certs\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.882447 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w28vg\" (UniqueName: \"kubernetes.io/projected/0a1b4a8a-456e-4756-8321-079731f5f729-kube-api-access-w28vg\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.907190 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.908047 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="77e5f87c-d414-4533-89b6-2455d46a6bd2" containerName="ceilometer-central-agent" containerID="cri-o://1039a1506b9671912793cc85799339f788c85ba91d3f3702398a85865258df8a" gracePeriod=30 Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.908472 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="77e5f87c-d414-4533-89b6-2455d46a6bd2" containerName="sg-core" containerID="cri-o://3afda70b7b37fbff4850dd32b60f45a370d53d21604b00c7803b42e31b5208c0" gracePeriod=30 Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.908437 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="77e5f87c-d414-4533-89b6-2455d46a6bd2" containerName="proxy-httpd" containerID="cri-o://2a74d660476a94cef3409c9cb9f3be725285ae7d10f35fec7a55a171ca48fc6b" gracePeriod=30 Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.908530 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="77e5f87c-d414-4533-89b6-2455d46a6bd2" containerName="ceilometer-notification-agent" containerID="cri-o://ded0de1a0b65711262769d68815082ef49a0345cee62a7dde51af1d293b99f89" gracePeriod=30 Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.984245 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0a1b4a8a-456e-4756-8321-079731f5f729-etc-machine-id\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.984290 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a1b4a8a-456e-4756-8321-079731f5f729-config-data\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.984317 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0a1b4a8a-456e-4756-8321-079731f5f729-config-data-custom\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.984338 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a1b4a8a-456e-4756-8321-079731f5f729-logs\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.984363 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a1b4a8a-456e-4756-8321-079731f5f729-public-tls-certs\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.984377 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w28vg\" (UniqueName: \"kubernetes.io/projected/0a1b4a8a-456e-4756-8321-079731f5f729-kube-api-access-w28vg\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.984387 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0a1b4a8a-456e-4756-8321-079731f5f729-etc-machine-id\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.984470 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a1b4a8a-456e-4756-8321-079731f5f729-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.984583 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a1b4a8a-456e-4756-8321-079731f5f729-scripts\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.984748 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a1b4a8a-456e-4756-8321-079731f5f729-internal-tls-certs\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.984832 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a1b4a8a-456e-4756-8321-079731f5f729-logs\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.989590 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0a1b4a8a-456e-4756-8321-079731f5f729-config-data-custom\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.991644 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a1b4a8a-456e-4756-8321-079731f5f729-scripts\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.994523 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a1b4a8a-456e-4756-8321-079731f5f729-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.995352 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a1b4a8a-456e-4756-8321-079731f5f729-internal-tls-certs\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:07 crc kubenswrapper[5045]: I1125 23:52:07.996092 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a1b4a8a-456e-4756-8321-079731f5f729-public-tls-certs\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:08 crc kubenswrapper[5045]: I1125 23:52:08.001028 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w28vg\" (UniqueName: \"kubernetes.io/projected/0a1b4a8a-456e-4756-8321-079731f5f729-kube-api-access-w28vg\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:08 crc kubenswrapper[5045]: I1125 23:52:08.001353 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a1b4a8a-456e-4756-8321-079731f5f729-config-data\") pod \"manila-api-0\" (UID: \"0a1b4a8a-456e-4756-8321-079731f5f729\") " pod="openstack/manila-api-0" Nov 25 23:52:08 crc kubenswrapper[5045]: I1125 23:52:08.150772 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 25 23:52:08 crc kubenswrapper[5045]: I1125 23:52:08.411857 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="811edefd-608a-445b-97fc-911959f84ff2" path="/var/lib/kubelet/pods/811edefd-608a-445b-97fc-911959f84ff2/volumes" Nov 25 23:52:08 crc kubenswrapper[5045]: I1125 23:52:08.756464 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 25 23:52:08 crc kubenswrapper[5045]: I1125 23:52:08.766815 5045 generic.go:334] "Generic (PLEG): container finished" podID="77e5f87c-d414-4533-89b6-2455d46a6bd2" containerID="2a74d660476a94cef3409c9cb9f3be725285ae7d10f35fec7a55a171ca48fc6b" exitCode=0 Nov 25 23:52:08 crc kubenswrapper[5045]: I1125 23:52:08.766850 5045 generic.go:334] "Generic (PLEG): container finished" podID="77e5f87c-d414-4533-89b6-2455d46a6bd2" containerID="3afda70b7b37fbff4850dd32b60f45a370d53d21604b00c7803b42e31b5208c0" exitCode=2 Nov 25 23:52:08 crc kubenswrapper[5045]: I1125 23:52:08.766862 5045 generic.go:334] "Generic (PLEG): container finished" podID="77e5f87c-d414-4533-89b6-2455d46a6bd2" containerID="1039a1506b9671912793cc85799339f788c85ba91d3f3702398a85865258df8a" exitCode=0 Nov 25 23:52:08 crc kubenswrapper[5045]: I1125 23:52:08.766906 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"77e5f87c-d414-4533-89b6-2455d46a6bd2","Type":"ContainerDied","Data":"2a74d660476a94cef3409c9cb9f3be725285ae7d10f35fec7a55a171ca48fc6b"} Nov 25 23:52:08 crc kubenswrapper[5045]: I1125 23:52:08.766935 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"77e5f87c-d414-4533-89b6-2455d46a6bd2","Type":"ContainerDied","Data":"3afda70b7b37fbff4850dd32b60f45a370d53d21604b00c7803b42e31b5208c0"} Nov 25 23:52:08 crc kubenswrapper[5045]: I1125 23:52:08.766948 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"77e5f87c-d414-4533-89b6-2455d46a6bd2","Type":"ContainerDied","Data":"1039a1506b9671912793cc85799339f788c85ba91d3f3702398a85865258df8a"} Nov 25 23:52:10 crc kubenswrapper[5045]: I1125 23:52:10.789806 5045 generic.go:334] "Generic (PLEG): container finished" podID="77e5f87c-d414-4533-89b6-2455d46a6bd2" containerID="ded0de1a0b65711262769d68815082ef49a0345cee62a7dde51af1d293b99f89" exitCode=0 Nov 25 23:52:10 crc kubenswrapper[5045]: I1125 23:52:10.789882 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"77e5f87c-d414-4533-89b6-2455d46a6bd2","Type":"ContainerDied","Data":"ded0de1a0b65711262769d68815082ef49a0345cee62a7dde51af1d293b99f89"} Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.344795 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.378372 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-sg-core-conf-yaml\") pod \"77e5f87c-d414-4533-89b6-2455d46a6bd2\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.378433 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/77e5f87c-d414-4533-89b6-2455d46a6bd2-run-httpd\") pod \"77e5f87c-d414-4533-89b6-2455d46a6bd2\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.378529 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-scripts\") pod \"77e5f87c-d414-4533-89b6-2455d46a6bd2\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.378576 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-ceilometer-tls-certs\") pod \"77e5f87c-d414-4533-89b6-2455d46a6bd2\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.378618 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-combined-ca-bundle\") pod \"77e5f87c-d414-4533-89b6-2455d46a6bd2\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.379170 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77e5f87c-d414-4533-89b6-2455d46a6bd2-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "77e5f87c-d414-4533-89b6-2455d46a6bd2" (UID: "77e5f87c-d414-4533-89b6-2455d46a6bd2"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.380965 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-config-data\") pod \"77e5f87c-d414-4533-89b6-2455d46a6bd2\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.381044 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnvf7\" (UniqueName: \"kubernetes.io/projected/77e5f87c-d414-4533-89b6-2455d46a6bd2-kube-api-access-hnvf7\") pod \"77e5f87c-d414-4533-89b6-2455d46a6bd2\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.381064 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/77e5f87c-d414-4533-89b6-2455d46a6bd2-log-httpd\") pod \"77e5f87c-d414-4533-89b6-2455d46a6bd2\" (UID: \"77e5f87c-d414-4533-89b6-2455d46a6bd2\") " Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.381664 5045 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/77e5f87c-d414-4533-89b6-2455d46a6bd2-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.384892 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77e5f87c-d414-4533-89b6-2455d46a6bd2-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "77e5f87c-d414-4533-89b6-2455d46a6bd2" (UID: "77e5f87c-d414-4533-89b6-2455d46a6bd2"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.389130 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-scripts" (OuterVolumeSpecName: "scripts") pod "77e5f87c-d414-4533-89b6-2455d46a6bd2" (UID: "77e5f87c-d414-4533-89b6-2455d46a6bd2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.397223 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77e5f87c-d414-4533-89b6-2455d46a6bd2-kube-api-access-hnvf7" (OuterVolumeSpecName: "kube-api-access-hnvf7") pod "77e5f87c-d414-4533-89b6-2455d46a6bd2" (UID: "77e5f87c-d414-4533-89b6-2455d46a6bd2"). InnerVolumeSpecName "kube-api-access-hnvf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.466851 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "77e5f87c-d414-4533-89b6-2455d46a6bd2" (UID: "77e5f87c-d414-4533-89b6-2455d46a6bd2"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.468620 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "77e5f87c-d414-4533-89b6-2455d46a6bd2" (UID: "77e5f87c-d414-4533-89b6-2455d46a6bd2"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.488142 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnvf7\" (UniqueName: \"kubernetes.io/projected/77e5f87c-d414-4533-89b6-2455d46a6bd2-kube-api-access-hnvf7\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.488167 5045 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/77e5f87c-d414-4533-89b6-2455d46a6bd2-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.488235 5045 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.488245 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.488254 5045 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.514823 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "77e5f87c-d414-4533-89b6-2455d46a6bd2" (UID: "77e5f87c-d414-4533-89b6-2455d46a6bd2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.585402 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-config-data" (OuterVolumeSpecName: "config-data") pod "77e5f87c-d414-4533-89b6-2455d46a6bd2" (UID: "77e5f87c-d414-4533-89b6-2455d46a6bd2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.591628 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.591661 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77e5f87c-d414-4533-89b6-2455d46a6bd2-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.831054 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"0a1b4a8a-456e-4756-8321-079731f5f729","Type":"ContainerStarted","Data":"80d5d38b18739b6cc4169e03d16d2d2cbb7db52c43f815a5cd8e547ad27605d0"} Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.831567 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"0a1b4a8a-456e-4756-8321-079731f5f729","Type":"ContainerStarted","Data":"a8bd7cb51b2494bf133f336fe2275e05ea024a315fef8e67b627503be0191186"} Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.846173 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"bfa1937a-1752-4377-adaa-651a09c43cc9","Type":"ContainerStarted","Data":"9a1a3831fe72bf4af335e1253281021754e4bee372839db0ca2bee17dde6b2bb"} Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.862909 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.866830 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"77e5f87c-d414-4533-89b6-2455d46a6bd2","Type":"ContainerDied","Data":"eced0398bf8acc4f1e29baec55be4a20340c2faeb5ba20b9f040b197a7824a81"} Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.866863 5045 scope.go:117] "RemoveContainer" containerID="2a74d660476a94cef3409c9cb9f3be725285ae7d10f35fec7a55a171ca48fc6b" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.866970 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.897699 5045 scope.go:117] "RemoveContainer" containerID="3afda70b7b37fbff4850dd32b60f45a370d53d21604b00c7803b42e31b5208c0" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.904918 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.921556 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.928737 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:52:12 crc kubenswrapper[5045]: E1125 23:52:12.929104 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77e5f87c-d414-4533-89b6-2455d46a6bd2" containerName="ceilometer-central-agent" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.929115 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="77e5f87c-d414-4533-89b6-2455d46a6bd2" containerName="ceilometer-central-agent" Nov 25 23:52:12 crc kubenswrapper[5045]: E1125 23:52:12.929128 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77e5f87c-d414-4533-89b6-2455d46a6bd2" containerName="proxy-httpd" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.929134 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="77e5f87c-d414-4533-89b6-2455d46a6bd2" containerName="proxy-httpd" Nov 25 23:52:12 crc kubenswrapper[5045]: E1125 23:52:12.929146 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77e5f87c-d414-4533-89b6-2455d46a6bd2" containerName="ceilometer-notification-agent" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.929153 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="77e5f87c-d414-4533-89b6-2455d46a6bd2" containerName="ceilometer-notification-agent" Nov 25 23:52:12 crc kubenswrapper[5045]: E1125 23:52:12.929173 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77e5f87c-d414-4533-89b6-2455d46a6bd2" containerName="sg-core" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.929179 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="77e5f87c-d414-4533-89b6-2455d46a6bd2" containerName="sg-core" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.929330 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="77e5f87c-d414-4533-89b6-2455d46a6bd2" containerName="ceilometer-central-agent" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.929348 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="77e5f87c-d414-4533-89b6-2455d46a6bd2" containerName="sg-core" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.929354 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="77e5f87c-d414-4533-89b6-2455d46a6bd2" containerName="ceilometer-notification-agent" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.929365 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="77e5f87c-d414-4533-89b6-2455d46a6bd2" containerName="proxy-httpd" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.930960 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.938162 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.938391 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.939056 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.947670 5045 scope.go:117] "RemoveContainer" containerID="ded0de1a0b65711262769d68815082ef49a0345cee62a7dde51af1d293b99f89" Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.970510 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:52:12 crc kubenswrapper[5045]: I1125 23:52:12.996448 5045 scope.go:117] "RemoveContainer" containerID="1039a1506b9671912793cc85799339f788c85ba91d3f3702398a85865258df8a" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.000697 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.000766 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-config-data\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.000956 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.000993 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-scripts\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.001137 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf57f38e-f54e-45b4-a6da-45b730478d35-log-httpd\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.001286 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdglw\" (UniqueName: \"kubernetes.io/projected/bf57f38e-f54e-45b4-a6da-45b730478d35-kube-api-access-fdglw\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.001404 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.001527 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf57f38e-f54e-45b4-a6da-45b730478d35-run-httpd\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.016792 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-747467cb48-48fv7" podUID="776ddfbe-2357-4e0e-a89e-2d82c43f4212" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.241:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.241:8443: connect: connection refused" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.047868 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-69655fd4bf-4xxtm" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.075930 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-88c75b4b-7vjc8" podUID="a78ee35a-ac96-40b7-b9aa-92bdaadf339b" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.242:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.242:8443: connect: connection refused" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.096863 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fbc59fbb7-c924c"] Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.097280 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" podUID="760e1230-bac9-431a-9ee5-ead3870b87e7" containerName="dnsmasq-dns" containerID="cri-o://f0fdbd4cc42246670e1fc873f5a6be88ea935d4c932b0463bd8de0940b676608" gracePeriod=10 Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.103216 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdglw\" (UniqueName: \"kubernetes.io/projected/bf57f38e-f54e-45b4-a6da-45b730478d35-kube-api-access-fdglw\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.103373 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.103476 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf57f38e-f54e-45b4-a6da-45b730478d35-run-httpd\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.103574 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.103682 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-config-data\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.103841 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.103926 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-scripts\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.104035 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf57f38e-f54e-45b4-a6da-45b730478d35-log-httpd\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.105139 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf57f38e-f54e-45b4-a6da-45b730478d35-run-httpd\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.105974 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf57f38e-f54e-45b4-a6da-45b730478d35-log-httpd\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.112364 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.127551 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.136752 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.140885 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdglw\" (UniqueName: \"kubernetes.io/projected/bf57f38e-f54e-45b4-a6da-45b730478d35-kube-api-access-fdglw\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.147985 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-config-data\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.170451 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-scripts\") pod \"ceilometer-0\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.256953 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.734380 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.825287 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-config\") pod \"760e1230-bac9-431a-9ee5-ead3870b87e7\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.825588 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-ovsdbserver-sb\") pod \"760e1230-bac9-431a-9ee5-ead3870b87e7\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.825783 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-dns-svc\") pod \"760e1230-bac9-431a-9ee5-ead3870b87e7\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.825812 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-ovsdbserver-nb\") pod \"760e1230-bac9-431a-9ee5-ead3870b87e7\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.825837 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-openstack-edpm-ipam\") pod \"760e1230-bac9-431a-9ee5-ead3870b87e7\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.825911 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sw7jv\" (UniqueName: \"kubernetes.io/projected/760e1230-bac9-431a-9ee5-ead3870b87e7-kube-api-access-sw7jv\") pod \"760e1230-bac9-431a-9ee5-ead3870b87e7\" (UID: \"760e1230-bac9-431a-9ee5-ead3870b87e7\") " Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.833895 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/760e1230-bac9-431a-9ee5-ead3870b87e7-kube-api-access-sw7jv" (OuterVolumeSpecName: "kube-api-access-sw7jv") pod "760e1230-bac9-431a-9ee5-ead3870b87e7" (UID: "760e1230-bac9-431a-9ee5-ead3870b87e7"). InnerVolumeSpecName "kube-api-access-sw7jv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.896476 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "760e1230-bac9-431a-9ee5-ead3870b87e7" (UID: "760e1230-bac9-431a-9ee5-ead3870b87e7"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.911677 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "760e1230-bac9-431a-9ee5-ead3870b87e7" (UID: "760e1230-bac9-431a-9ee5-ead3870b87e7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.915766 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "760e1230-bac9-431a-9ee5-ead3870b87e7" (UID: "760e1230-bac9-431a-9ee5-ead3870b87e7"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.917178 5045 generic.go:334] "Generic (PLEG): container finished" podID="760e1230-bac9-431a-9ee5-ead3870b87e7" containerID="f0fdbd4cc42246670e1fc873f5a6be88ea935d4c932b0463bd8de0940b676608" exitCode=0 Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.917231 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" event={"ID":"760e1230-bac9-431a-9ee5-ead3870b87e7","Type":"ContainerDied","Data":"f0fdbd4cc42246670e1fc873f5a6be88ea935d4c932b0463bd8de0940b676608"} Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.917254 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" event={"ID":"760e1230-bac9-431a-9ee5-ead3870b87e7","Type":"ContainerDied","Data":"ba10a8e43538fc54158a1b71bc8f96f4a5388ed9f1d49997aab7d31c02537678"} Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.917271 5045 scope.go:117] "RemoveContainer" containerID="f0fdbd4cc42246670e1fc873f5a6be88ea935d4c932b0463bd8de0940b676608" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.917355 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fbc59fbb7-c924c" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.928171 5045 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.928191 5045 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.928203 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sw7jv\" (UniqueName: \"kubernetes.io/projected/760e1230-bac9-431a-9ee5-ead3870b87e7-kube-api-access-sw7jv\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.928212 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.936649 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"0a1b4a8a-456e-4756-8321-079731f5f729","Type":"ContainerStarted","Data":"8822e4f9b7cc8c97deab72f06a812f51c07180760b32581593124baccd702e60"} Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.937938 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.939507 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"bfa1937a-1752-4377-adaa-651a09c43cc9","Type":"ContainerStarted","Data":"757a650ae1abce5e5945bdeb7d37fdcb48c464f755c1a2f8725b410855a59a66"} Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.947328 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.949270 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-config" (OuterVolumeSpecName: "config") pod "760e1230-bac9-431a-9ee5-ead3870b87e7" (UID: "760e1230-bac9-431a-9ee5-ead3870b87e7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.954368 5045 scope.go:117] "RemoveContainer" containerID="7f968466cc8c490ea910978d4e8e883df6287949d7f035dcb89586cb8a833c11" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.971740 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=6.971700578 podStartE2EDuration="6.971700578s" podCreationTimestamp="2025-11-25 23:52:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:52:13.959641195 +0000 UTC m=+3190.317300297" watchObservedRunningTime="2025-11-25 23:52:13.971700578 +0000 UTC m=+3190.329359690" Nov 25 23:52:13 crc kubenswrapper[5045]: I1125 23:52:13.984186 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "760e1230-bac9-431a-9ee5-ead3870b87e7" (UID: "760e1230-bac9-431a-9ee5-ead3870b87e7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:52:14 crc kubenswrapper[5045]: I1125 23:52:13.999234 5045 scope.go:117] "RemoveContainer" containerID="f0fdbd4cc42246670e1fc873f5a6be88ea935d4c932b0463bd8de0940b676608" Nov 25 23:52:14 crc kubenswrapper[5045]: E1125 23:52:13.999509 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0fdbd4cc42246670e1fc873f5a6be88ea935d4c932b0463bd8de0940b676608\": container with ID starting with f0fdbd4cc42246670e1fc873f5a6be88ea935d4c932b0463bd8de0940b676608 not found: ID does not exist" containerID="f0fdbd4cc42246670e1fc873f5a6be88ea935d4c932b0463bd8de0940b676608" Nov 25 23:52:14 crc kubenswrapper[5045]: I1125 23:52:13.999535 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0fdbd4cc42246670e1fc873f5a6be88ea935d4c932b0463bd8de0940b676608"} err="failed to get container status \"f0fdbd4cc42246670e1fc873f5a6be88ea935d4c932b0463bd8de0940b676608\": rpc error: code = NotFound desc = could not find container \"f0fdbd4cc42246670e1fc873f5a6be88ea935d4c932b0463bd8de0940b676608\": container with ID starting with f0fdbd4cc42246670e1fc873f5a6be88ea935d4c932b0463bd8de0940b676608 not found: ID does not exist" Nov 25 23:52:14 crc kubenswrapper[5045]: I1125 23:52:13.999554 5045 scope.go:117] "RemoveContainer" containerID="7f968466cc8c490ea910978d4e8e883df6287949d7f035dcb89586cb8a833c11" Nov 25 23:52:14 crc kubenswrapper[5045]: E1125 23:52:13.999704 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f968466cc8c490ea910978d4e8e883df6287949d7f035dcb89586cb8a833c11\": container with ID starting with 7f968466cc8c490ea910978d4e8e883df6287949d7f035dcb89586cb8a833c11 not found: ID does not exist" containerID="7f968466cc8c490ea910978d4e8e883df6287949d7f035dcb89586cb8a833c11" Nov 25 23:52:14 crc kubenswrapper[5045]: I1125 23:52:13.999772 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f968466cc8c490ea910978d4e8e883df6287949d7f035dcb89586cb8a833c11"} err="failed to get container status \"7f968466cc8c490ea910978d4e8e883df6287949d7f035dcb89586cb8a833c11\": rpc error: code = NotFound desc = could not find container \"7f968466cc8c490ea910978d4e8e883df6287949d7f035dcb89586cb8a833c11\": container with ID starting with 7f968466cc8c490ea910978d4e8e883df6287949d7f035dcb89586cb8a833c11 not found: ID does not exist" Nov 25 23:52:14 crc kubenswrapper[5045]: I1125 23:52:14.002178 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=3.478093104 podStartE2EDuration="12.002155362s" podCreationTimestamp="2025-11-25 23:52:02 +0000 UTC" firstStartedPulling="2025-11-25 23:52:03.638264282 +0000 UTC m=+3179.995923394" lastFinishedPulling="2025-11-25 23:52:12.16232652 +0000 UTC m=+3188.519985652" observedRunningTime="2025-11-25 23:52:13.982337563 +0000 UTC m=+3190.339996675" watchObservedRunningTime="2025-11-25 23:52:14.002155362 +0000 UTC m=+3190.359814474" Nov 25 23:52:14 crc kubenswrapper[5045]: I1125 23:52:14.030281 5045 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:14 crc kubenswrapper[5045]: I1125 23:52:14.030319 5045 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/760e1230-bac9-431a-9ee5-ead3870b87e7-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:14 crc kubenswrapper[5045]: I1125 23:52:14.260472 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fbc59fbb7-c924c"] Nov 25 23:52:14 crc kubenswrapper[5045]: I1125 23:52:14.270280 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-fbc59fbb7-c924c"] Nov 25 23:52:14 crc kubenswrapper[5045]: I1125 23:52:14.409859 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="760e1230-bac9-431a-9ee5-ead3870b87e7" path="/var/lib/kubelet/pods/760e1230-bac9-431a-9ee5-ead3870b87e7/volumes" Nov 25 23:52:14 crc kubenswrapper[5045]: I1125 23:52:14.411353 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77e5f87c-d414-4533-89b6-2455d46a6bd2" path="/var/lib/kubelet/pods/77e5f87c-d414-4533-89b6-2455d46a6bd2/volumes" Nov 25 23:52:14 crc kubenswrapper[5045]: I1125 23:52:14.967202 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf57f38e-f54e-45b4-a6da-45b730478d35","Type":"ContainerStarted","Data":"ea250ff29b0a9db2b33e04f1bbd50d537cd262c4c088966f5994f8f0c3964815"} Nov 25 23:52:14 crc kubenswrapper[5045]: I1125 23:52:14.967464 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf57f38e-f54e-45b4-a6da-45b730478d35","Type":"ContainerStarted","Data":"efb1bc6a72fcb281df56dc8db06afea91f42ef7868c3b0a8e813c8d95a8375ae"} Nov 25 23:52:15 crc kubenswrapper[5045]: I1125 23:52:15.981771 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf57f38e-f54e-45b4-a6da-45b730478d35","Type":"ContainerStarted","Data":"fa69b193b322c2ccf0effabaaa4f20eee95630955b8e30737e525ef2ecfcf70c"} Nov 25 23:52:16 crc kubenswrapper[5045]: I1125 23:52:16.106612 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:52:16 crc kubenswrapper[5045]: I1125 23:52:16.396958 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:52:16 crc kubenswrapper[5045]: E1125 23:52:16.397210 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:52:16 crc kubenswrapper[5045]: I1125 23:52:16.997245 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf57f38e-f54e-45b4-a6da-45b730478d35","Type":"ContainerStarted","Data":"4bc90aab59bab006e0e91721fa33266bd7bc180e58d9064bcaba652e19229bf7"} Nov 25 23:52:19 crc kubenswrapper[5045]: I1125 23:52:19.026491 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf57f38e-f54e-45b4-a6da-45b730478d35","Type":"ContainerStarted","Data":"95386e79d050ad4cd4198148ca1757834d7a7f2a29a32fd13203389bf0a36e02"} Nov 25 23:52:19 crc kubenswrapper[5045]: I1125 23:52:19.027313 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bf57f38e-f54e-45b4-a6da-45b730478d35" containerName="ceilometer-central-agent" containerID="cri-o://ea250ff29b0a9db2b33e04f1bbd50d537cd262c4c088966f5994f8f0c3964815" gracePeriod=30 Nov 25 23:52:19 crc kubenswrapper[5045]: I1125 23:52:19.027806 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 23:52:19 crc kubenswrapper[5045]: I1125 23:52:19.028075 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bf57f38e-f54e-45b4-a6da-45b730478d35" containerName="proxy-httpd" containerID="cri-o://95386e79d050ad4cd4198148ca1757834d7a7f2a29a32fd13203389bf0a36e02" gracePeriod=30 Nov 25 23:52:19 crc kubenswrapper[5045]: I1125 23:52:19.028124 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bf57f38e-f54e-45b4-a6da-45b730478d35" containerName="sg-core" containerID="cri-o://4bc90aab59bab006e0e91721fa33266bd7bc180e58d9064bcaba652e19229bf7" gracePeriod=30 Nov 25 23:52:19 crc kubenswrapper[5045]: I1125 23:52:19.028167 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bf57f38e-f54e-45b4-a6da-45b730478d35" containerName="ceilometer-notification-agent" containerID="cri-o://fa69b193b322c2ccf0effabaaa4f20eee95630955b8e30737e525ef2ecfcf70c" gracePeriod=30 Nov 25 23:52:19 crc kubenswrapper[5045]: I1125 23:52:19.063421 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.062560323 podStartE2EDuration="7.0633969s" podCreationTimestamp="2025-11-25 23:52:12 +0000 UTC" firstStartedPulling="2025-11-25 23:52:13.957386472 +0000 UTC m=+3190.315045584" lastFinishedPulling="2025-11-25 23:52:17.958223039 +0000 UTC m=+3194.315882161" observedRunningTime="2025-11-25 23:52:19.057477096 +0000 UTC m=+3195.415136208" watchObservedRunningTime="2025-11-25 23:52:19.0633969 +0000 UTC m=+3195.421056022" Nov 25 23:52:20 crc kubenswrapper[5045]: I1125 23:52:20.041595 5045 generic.go:334] "Generic (PLEG): container finished" podID="bf57f38e-f54e-45b4-a6da-45b730478d35" containerID="95386e79d050ad4cd4198148ca1757834d7a7f2a29a32fd13203389bf0a36e02" exitCode=0 Nov 25 23:52:20 crc kubenswrapper[5045]: I1125 23:52:20.042004 5045 generic.go:334] "Generic (PLEG): container finished" podID="bf57f38e-f54e-45b4-a6da-45b730478d35" containerID="4bc90aab59bab006e0e91721fa33266bd7bc180e58d9064bcaba652e19229bf7" exitCode=2 Nov 25 23:52:20 crc kubenswrapper[5045]: I1125 23:52:20.042016 5045 generic.go:334] "Generic (PLEG): container finished" podID="bf57f38e-f54e-45b4-a6da-45b730478d35" containerID="fa69b193b322c2ccf0effabaaa4f20eee95630955b8e30737e525ef2ecfcf70c" exitCode=0 Nov 25 23:52:20 crc kubenswrapper[5045]: I1125 23:52:20.041780 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf57f38e-f54e-45b4-a6da-45b730478d35","Type":"ContainerDied","Data":"95386e79d050ad4cd4198148ca1757834d7a7f2a29a32fd13203389bf0a36e02"} Nov 25 23:52:20 crc kubenswrapper[5045]: I1125 23:52:20.042051 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf57f38e-f54e-45b4-a6da-45b730478d35","Type":"ContainerDied","Data":"4bc90aab59bab006e0e91721fa33266bd7bc180e58d9064bcaba652e19229bf7"} Nov 25 23:52:20 crc kubenswrapper[5045]: I1125 23:52:20.042064 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf57f38e-f54e-45b4-a6da-45b730478d35","Type":"ContainerDied","Data":"fa69b193b322c2ccf0effabaaa4f20eee95630955b8e30737e525ef2ecfcf70c"} Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.063421 5045 generic.go:334] "Generic (PLEG): container finished" podID="bf57f38e-f54e-45b4-a6da-45b730478d35" containerID="ea250ff29b0a9db2b33e04f1bbd50d537cd262c4c088966f5994f8f0c3964815" exitCode=0 Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.063537 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf57f38e-f54e-45b4-a6da-45b730478d35","Type":"ContainerDied","Data":"ea250ff29b0a9db2b33e04f1bbd50d537cd262c4c088966f5994f8f0c3964815"} Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.063842 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf57f38e-f54e-45b4-a6da-45b730478d35","Type":"ContainerDied","Data":"efb1bc6a72fcb281df56dc8db06afea91f42ef7868c3b0a8e813c8d95a8375ae"} Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.063887 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="efb1bc6a72fcb281df56dc8db06afea91f42ef7868c3b0a8e813c8d95a8375ae" Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.114784 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.178555 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-sg-core-conf-yaml\") pod \"bf57f38e-f54e-45b4-a6da-45b730478d35\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.178652 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-config-data\") pod \"bf57f38e-f54e-45b4-a6da-45b730478d35\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.178703 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-ceilometer-tls-certs\") pod \"bf57f38e-f54e-45b4-a6da-45b730478d35\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.178829 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-combined-ca-bundle\") pod \"bf57f38e-f54e-45b4-a6da-45b730478d35\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.178911 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fdglw\" (UniqueName: \"kubernetes.io/projected/bf57f38e-f54e-45b4-a6da-45b730478d35-kube-api-access-fdglw\") pod \"bf57f38e-f54e-45b4-a6da-45b730478d35\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.179008 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf57f38e-f54e-45b4-a6da-45b730478d35-log-httpd\") pod \"bf57f38e-f54e-45b4-a6da-45b730478d35\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.179162 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf57f38e-f54e-45b4-a6da-45b730478d35-run-httpd\") pod \"bf57f38e-f54e-45b4-a6da-45b730478d35\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.180143 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-scripts\") pod \"bf57f38e-f54e-45b4-a6da-45b730478d35\" (UID: \"bf57f38e-f54e-45b4-a6da-45b730478d35\") " Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.181325 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf57f38e-f54e-45b4-a6da-45b730478d35-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "bf57f38e-f54e-45b4-a6da-45b730478d35" (UID: "bf57f38e-f54e-45b4-a6da-45b730478d35"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.184417 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf57f38e-f54e-45b4-a6da-45b730478d35-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "bf57f38e-f54e-45b4-a6da-45b730478d35" (UID: "bf57f38e-f54e-45b4-a6da-45b730478d35"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.188384 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-scripts" (OuterVolumeSpecName: "scripts") pod "bf57f38e-f54e-45b4-a6da-45b730478d35" (UID: "bf57f38e-f54e-45b4-a6da-45b730478d35"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.190986 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf57f38e-f54e-45b4-a6da-45b730478d35-kube-api-access-fdglw" (OuterVolumeSpecName: "kube-api-access-fdglw") pod "bf57f38e-f54e-45b4-a6da-45b730478d35" (UID: "bf57f38e-f54e-45b4-a6da-45b730478d35"). InnerVolumeSpecName "kube-api-access-fdglw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.244499 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "bf57f38e-f54e-45b4-a6da-45b730478d35" (UID: "bf57f38e-f54e-45b4-a6da-45b730478d35"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.274823 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "bf57f38e-f54e-45b4-a6da-45b730478d35" (UID: "bf57f38e-f54e-45b4-a6da-45b730478d35"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.283604 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.283638 5045 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.283652 5045 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.283665 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fdglw\" (UniqueName: \"kubernetes.io/projected/bf57f38e-f54e-45b4-a6da-45b730478d35-kube-api-access-fdglw\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.283676 5045 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf57f38e-f54e-45b4-a6da-45b730478d35-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.283703 5045 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf57f38e-f54e-45b4-a6da-45b730478d35-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.309838 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bf57f38e-f54e-45b4-a6da-45b730478d35" (UID: "bf57f38e-f54e-45b4-a6da-45b730478d35"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.315159 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-config-data" (OuterVolumeSpecName: "config-data") pod "bf57f38e-f54e-45b4-a6da-45b730478d35" (UID: "bf57f38e-f54e-45b4-a6da-45b730478d35"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.385908 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:21 crc kubenswrapper[5045]: I1125 23:52:21.385942 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf57f38e-f54e-45b4-a6da-45b730478d35-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.075975 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.138131 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.161524 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.176420 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:52:22 crc kubenswrapper[5045]: E1125 23:52:22.177265 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="760e1230-bac9-431a-9ee5-ead3870b87e7" containerName="dnsmasq-dns" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.177307 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="760e1230-bac9-431a-9ee5-ead3870b87e7" containerName="dnsmasq-dns" Nov 25 23:52:22 crc kubenswrapper[5045]: E1125 23:52:22.177343 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf57f38e-f54e-45b4-a6da-45b730478d35" containerName="ceilometer-notification-agent" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.177360 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf57f38e-f54e-45b4-a6da-45b730478d35" containerName="ceilometer-notification-agent" Nov 25 23:52:22 crc kubenswrapper[5045]: E1125 23:52:22.177399 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf57f38e-f54e-45b4-a6da-45b730478d35" containerName="proxy-httpd" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.177416 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf57f38e-f54e-45b4-a6da-45b730478d35" containerName="proxy-httpd" Nov 25 23:52:22 crc kubenswrapper[5045]: E1125 23:52:22.177458 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf57f38e-f54e-45b4-a6da-45b730478d35" containerName="ceilometer-central-agent" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.177475 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf57f38e-f54e-45b4-a6da-45b730478d35" containerName="ceilometer-central-agent" Nov 25 23:52:22 crc kubenswrapper[5045]: E1125 23:52:22.177503 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="760e1230-bac9-431a-9ee5-ead3870b87e7" containerName="init" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.177521 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="760e1230-bac9-431a-9ee5-ead3870b87e7" containerName="init" Nov 25 23:52:22 crc kubenswrapper[5045]: E1125 23:52:22.177589 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf57f38e-f54e-45b4-a6da-45b730478d35" containerName="sg-core" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.177606 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf57f38e-f54e-45b4-a6da-45b730478d35" containerName="sg-core" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.178072 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf57f38e-f54e-45b4-a6da-45b730478d35" containerName="proxy-httpd" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.178122 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf57f38e-f54e-45b4-a6da-45b730478d35" containerName="sg-core" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.178172 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="760e1230-bac9-431a-9ee5-ead3870b87e7" containerName="dnsmasq-dns" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.178203 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf57f38e-f54e-45b4-a6da-45b730478d35" containerName="ceilometer-notification-agent" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.178224 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf57f38e-f54e-45b4-a6da-45b730478d35" containerName="ceilometer-central-agent" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.181620 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.184954 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.185372 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.185662 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.188030 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.311367 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d111259-fd01-46df-9e3b-e25c7a05f59d-scripts\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.311445 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d111259-fd01-46df-9e3b-e25c7a05f59d-log-httpd\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.311648 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvwlh\" (UniqueName: \"kubernetes.io/projected/3d111259-fd01-46df-9e3b-e25c7a05f59d-kube-api-access-cvwlh\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.311766 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d111259-fd01-46df-9e3b-e25c7a05f59d-run-httpd\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.311996 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d111259-fd01-46df-9e3b-e25c7a05f59d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.312106 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3d111259-fd01-46df-9e3b-e25c7a05f59d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.312335 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d111259-fd01-46df-9e3b-e25c7a05f59d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.312408 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d111259-fd01-46df-9e3b-e25c7a05f59d-config-data\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.414362 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d111259-fd01-46df-9e3b-e25c7a05f59d-log-httpd\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.414420 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvwlh\" (UniqueName: \"kubernetes.io/projected/3d111259-fd01-46df-9e3b-e25c7a05f59d-kube-api-access-cvwlh\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.414452 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d111259-fd01-46df-9e3b-e25c7a05f59d-run-httpd\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.414512 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d111259-fd01-46df-9e3b-e25c7a05f59d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.414541 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3d111259-fd01-46df-9e3b-e25c7a05f59d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.414592 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d111259-fd01-46df-9e3b-e25c7a05f59d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.414613 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d111259-fd01-46df-9e3b-e25c7a05f59d-config-data\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.414648 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d111259-fd01-46df-9e3b-e25c7a05f59d-scripts\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.415238 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d111259-fd01-46df-9e3b-e25c7a05f59d-log-httpd\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.415401 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d111259-fd01-46df-9e3b-e25c7a05f59d-run-httpd\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.420497 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d111259-fd01-46df-9e3b-e25c7a05f59d-config-data\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.421373 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d111259-fd01-46df-9e3b-e25c7a05f59d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.421688 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf57f38e-f54e-45b4-a6da-45b730478d35" path="/var/lib/kubelet/pods/bf57f38e-f54e-45b4-a6da-45b730478d35/volumes" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.431372 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d111259-fd01-46df-9e3b-e25c7a05f59d-scripts\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.434492 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3d111259-fd01-46df-9e3b-e25c7a05f59d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.443786 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d111259-fd01-46df-9e3b-e25c7a05f59d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.449503 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvwlh\" (UniqueName: \"kubernetes.io/projected/3d111259-fd01-46df-9e3b-e25c7a05f59d-kube-api-access-cvwlh\") pod \"ceilometer-0\" (UID: \"3d111259-fd01-46df-9e3b-e25c7a05f59d\") " pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.512133 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 23:52:22 crc kubenswrapper[5045]: I1125 23:52:22.888682 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 23:52:23 crc kubenswrapper[5045]: I1125 23:52:23.088964 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 23:52:24 crc kubenswrapper[5045]: I1125 23:52:24.101275 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d111259-fd01-46df-9e3b-e25c7a05f59d","Type":"ContainerStarted","Data":"f2e688d0e91e2fa0b5afd2895f2c9c4af3408940ab65ae2b4bc1840783ffdc1f"} Nov 25 23:52:24 crc kubenswrapper[5045]: I1125 23:52:24.101875 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d111259-fd01-46df-9e3b-e25c7a05f59d","Type":"ContainerStarted","Data":"370cc909d56e15b22536955062e142308676896be75853f438f13ce45e850244"} Nov 25 23:52:24 crc kubenswrapper[5045]: I1125 23:52:24.357274 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Nov 25 23:52:24 crc kubenswrapper[5045]: I1125 23:52:24.369187 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Nov 25 23:52:24 crc kubenswrapper[5045]: I1125 23:52:24.487840 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 23:52:24 crc kubenswrapper[5045]: I1125 23:52:24.513138 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 23:52:25 crc kubenswrapper[5045]: I1125 23:52:25.116506 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d111259-fd01-46df-9e3b-e25c7a05f59d","Type":"ContainerStarted","Data":"0c9709048b496148289d3921d4c032d03554bba6e22230eb60a15772536c62e1"} Nov 25 23:52:25 crc kubenswrapper[5045]: I1125 23:52:25.116787 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-scheduler-0" podUID="74380024-104d-416f-a8e6-a6c13b16950f" containerName="manila-scheduler" containerID="cri-o://732fe7fc9e3c5937c08c09bf6894190f42661057c12d08057d5ae96a76d6d8b7" gracePeriod=30 Nov 25 23:52:25 crc kubenswrapper[5045]: I1125 23:52:25.116911 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-share-share1-0" podUID="bfa1937a-1752-4377-adaa-651a09c43cc9" containerName="probe" containerID="cri-o://757a650ae1abce5e5945bdeb7d37fdcb48c464f755c1a2f8725b410855a59a66" gracePeriod=30 Nov 25 23:52:25 crc kubenswrapper[5045]: I1125 23:52:25.116795 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-share-share1-0" podUID="bfa1937a-1752-4377-adaa-651a09c43cc9" containerName="manila-share" containerID="cri-o://9a1a3831fe72bf4af335e1253281021754e4bee372839db0ca2bee17dde6b2bb" gracePeriod=30 Nov 25 23:52:25 crc kubenswrapper[5045]: I1125 23:52:25.116823 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-scheduler-0" podUID="74380024-104d-416f-a8e6-a6c13b16950f" containerName="probe" containerID="cri-o://55c00e1ae84dee9a49b562189ae48d9393d8e11f69d5e551427a93d49dcaa358" gracePeriod=30 Nov 25 23:52:25 crc kubenswrapper[5045]: I1125 23:52:25.444115 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:52:25 crc kubenswrapper[5045]: I1125 23:52:25.482462 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.066082 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.129780 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d111259-fd01-46df-9e3b-e25c7a05f59d","Type":"ContainerStarted","Data":"cfd2643c9a443ef5bf2c8cb7e34786a95f2ef696fe34aff2bfb12db0f234e863"} Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.132218 5045 generic.go:334] "Generic (PLEG): container finished" podID="74380024-104d-416f-a8e6-a6c13b16950f" containerID="55c00e1ae84dee9a49b562189ae48d9393d8e11f69d5e551427a93d49dcaa358" exitCode=0 Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.132289 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"74380024-104d-416f-a8e6-a6c13b16950f","Type":"ContainerDied","Data":"55c00e1ae84dee9a49b562189ae48d9393d8e11f69d5e551427a93d49dcaa358"} Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.134462 5045 generic.go:334] "Generic (PLEG): container finished" podID="bfa1937a-1752-4377-adaa-651a09c43cc9" containerID="757a650ae1abce5e5945bdeb7d37fdcb48c464f755c1a2f8725b410855a59a66" exitCode=0 Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.134529 5045 generic.go:334] "Generic (PLEG): container finished" podID="bfa1937a-1752-4377-adaa-651a09c43cc9" containerID="9a1a3831fe72bf4af335e1253281021754e4bee372839db0ca2bee17dde6b2bb" exitCode=1 Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.134546 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"bfa1937a-1752-4377-adaa-651a09c43cc9","Type":"ContainerDied","Data":"757a650ae1abce5e5945bdeb7d37fdcb48c464f755c1a2f8725b410855a59a66"} Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.134563 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"bfa1937a-1752-4377-adaa-651a09c43cc9","Type":"ContainerDied","Data":"9a1a3831fe72bf4af335e1253281021754e4bee372839db0ca2bee17dde6b2bb"} Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.134574 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"bfa1937a-1752-4377-adaa-651a09c43cc9","Type":"ContainerDied","Data":"8cc8511f1b98788f3694b57c94b99e75076366c5adfff5fbf6fa89d9faf03e43"} Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.134613 5045 scope.go:117] "RemoveContainer" containerID="757a650ae1abce5e5945bdeb7d37fdcb48c464f755c1a2f8725b410855a59a66" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.134786 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.161957 5045 scope.go:117] "RemoveContainer" containerID="9a1a3831fe72bf4af335e1253281021754e4bee372839db0ca2bee17dde6b2bb" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.192766 5045 scope.go:117] "RemoveContainer" containerID="757a650ae1abce5e5945bdeb7d37fdcb48c464f755c1a2f8725b410855a59a66" Nov 25 23:52:26 crc kubenswrapper[5045]: E1125 23:52:26.193445 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"757a650ae1abce5e5945bdeb7d37fdcb48c464f755c1a2f8725b410855a59a66\": container with ID starting with 757a650ae1abce5e5945bdeb7d37fdcb48c464f755c1a2f8725b410855a59a66 not found: ID does not exist" containerID="757a650ae1abce5e5945bdeb7d37fdcb48c464f755c1a2f8725b410855a59a66" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.193480 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"757a650ae1abce5e5945bdeb7d37fdcb48c464f755c1a2f8725b410855a59a66"} err="failed to get container status \"757a650ae1abce5e5945bdeb7d37fdcb48c464f755c1a2f8725b410855a59a66\": rpc error: code = NotFound desc = could not find container \"757a650ae1abce5e5945bdeb7d37fdcb48c464f755c1a2f8725b410855a59a66\": container with ID starting with 757a650ae1abce5e5945bdeb7d37fdcb48c464f755c1a2f8725b410855a59a66 not found: ID does not exist" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.193501 5045 scope.go:117] "RemoveContainer" containerID="9a1a3831fe72bf4af335e1253281021754e4bee372839db0ca2bee17dde6b2bb" Nov 25 23:52:26 crc kubenswrapper[5045]: E1125 23:52:26.195805 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a1a3831fe72bf4af335e1253281021754e4bee372839db0ca2bee17dde6b2bb\": container with ID starting with 9a1a3831fe72bf4af335e1253281021754e4bee372839db0ca2bee17dde6b2bb not found: ID does not exist" containerID="9a1a3831fe72bf4af335e1253281021754e4bee372839db0ca2bee17dde6b2bb" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.195834 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a1a3831fe72bf4af335e1253281021754e4bee372839db0ca2bee17dde6b2bb"} err="failed to get container status \"9a1a3831fe72bf4af335e1253281021754e4bee372839db0ca2bee17dde6b2bb\": rpc error: code = NotFound desc = could not find container \"9a1a3831fe72bf4af335e1253281021754e4bee372839db0ca2bee17dde6b2bb\": container with ID starting with 9a1a3831fe72bf4af335e1253281021754e4bee372839db0ca2bee17dde6b2bb not found: ID does not exist" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.195850 5045 scope.go:117] "RemoveContainer" containerID="757a650ae1abce5e5945bdeb7d37fdcb48c464f755c1a2f8725b410855a59a66" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.196088 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"757a650ae1abce5e5945bdeb7d37fdcb48c464f755c1a2f8725b410855a59a66"} err="failed to get container status \"757a650ae1abce5e5945bdeb7d37fdcb48c464f755c1a2f8725b410855a59a66\": rpc error: code = NotFound desc = could not find container \"757a650ae1abce5e5945bdeb7d37fdcb48c464f755c1a2f8725b410855a59a66\": container with ID starting with 757a650ae1abce5e5945bdeb7d37fdcb48c464f755c1a2f8725b410855a59a66 not found: ID does not exist" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.196108 5045 scope.go:117] "RemoveContainer" containerID="9a1a3831fe72bf4af335e1253281021754e4bee372839db0ca2bee17dde6b2bb" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.196384 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a1a3831fe72bf4af335e1253281021754e4bee372839db0ca2bee17dde6b2bb"} err="failed to get container status \"9a1a3831fe72bf4af335e1253281021754e4bee372839db0ca2bee17dde6b2bb\": rpc error: code = NotFound desc = could not find container \"9a1a3831fe72bf4af335e1253281021754e4bee372839db0ca2bee17dde6b2bb\": container with ID starting with 9a1a3831fe72bf4af335e1253281021754e4bee372839db0ca2bee17dde6b2bb not found: ID does not exist" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.240244 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-scripts\") pod \"bfa1937a-1752-4377-adaa-651a09c43cc9\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.240409 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kh6vw\" (UniqueName: \"kubernetes.io/projected/bfa1937a-1752-4377-adaa-651a09c43cc9-kube-api-access-kh6vw\") pod \"bfa1937a-1752-4377-adaa-651a09c43cc9\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.240448 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/bfa1937a-1752-4377-adaa-651a09c43cc9-ceph\") pod \"bfa1937a-1752-4377-adaa-651a09c43cc9\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.240491 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bfa1937a-1752-4377-adaa-651a09c43cc9-etc-machine-id\") pod \"bfa1937a-1752-4377-adaa-651a09c43cc9\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.240558 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-config-data\") pod \"bfa1937a-1752-4377-adaa-651a09c43cc9\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.240583 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-config-data-custom\") pod \"bfa1937a-1752-4377-adaa-651a09c43cc9\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.240625 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-combined-ca-bundle\") pod \"bfa1937a-1752-4377-adaa-651a09c43cc9\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.240698 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/bfa1937a-1752-4377-adaa-651a09c43cc9-var-lib-manila\") pod \"bfa1937a-1752-4377-adaa-651a09c43cc9\" (UID: \"bfa1937a-1752-4377-adaa-651a09c43cc9\") " Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.241052 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bfa1937a-1752-4377-adaa-651a09c43cc9-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "bfa1937a-1752-4377-adaa-651a09c43cc9" (UID: "bfa1937a-1752-4377-adaa-651a09c43cc9"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.241195 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bfa1937a-1752-4377-adaa-651a09c43cc9-var-lib-manila" (OuterVolumeSpecName: "var-lib-manila") pod "bfa1937a-1752-4377-adaa-651a09c43cc9" (UID: "bfa1937a-1752-4377-adaa-651a09c43cc9"). InnerVolumeSpecName "var-lib-manila". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.246981 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bfa1937a-1752-4377-adaa-651a09c43cc9-ceph" (OuterVolumeSpecName: "ceph") pod "bfa1937a-1752-4377-adaa-651a09c43cc9" (UID: "bfa1937a-1752-4377-adaa-651a09c43cc9"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.247109 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "bfa1937a-1752-4377-adaa-651a09c43cc9" (UID: "bfa1937a-1752-4377-adaa-651a09c43cc9"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.247434 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-scripts" (OuterVolumeSpecName: "scripts") pod "bfa1937a-1752-4377-adaa-651a09c43cc9" (UID: "bfa1937a-1752-4377-adaa-651a09c43cc9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.249548 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bfa1937a-1752-4377-adaa-651a09c43cc9-kube-api-access-kh6vw" (OuterVolumeSpecName: "kube-api-access-kh6vw") pod "bfa1937a-1752-4377-adaa-651a09c43cc9" (UID: "bfa1937a-1752-4377-adaa-651a09c43cc9"). InnerVolumeSpecName "kube-api-access-kh6vw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.311976 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bfa1937a-1752-4377-adaa-651a09c43cc9" (UID: "bfa1937a-1752-4377-adaa-651a09c43cc9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.347676 5045 reconciler_common.go:293] "Volume detached for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/bfa1937a-1752-4377-adaa-651a09c43cc9-var-lib-manila\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.347923 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.348006 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kh6vw\" (UniqueName: \"kubernetes.io/projected/bfa1937a-1752-4377-adaa-651a09c43cc9-kube-api-access-kh6vw\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.348425 5045 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/bfa1937a-1752-4377-adaa-651a09c43cc9-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.348535 5045 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bfa1937a-1752-4377-adaa-651a09c43cc9-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.348698 5045 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.348789 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.348918 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-config-data" (OuterVolumeSpecName: "config-data") pod "bfa1937a-1752-4377-adaa-651a09c43cc9" (UID: "bfa1937a-1752-4377-adaa-651a09c43cc9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.450454 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfa1937a-1752-4377-adaa-651a09c43cc9-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.454348 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.462835 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.473448 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 23:52:26 crc kubenswrapper[5045]: E1125 23:52:26.474037 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfa1937a-1752-4377-adaa-651a09c43cc9" containerName="manila-share" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.479067 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfa1937a-1752-4377-adaa-651a09c43cc9" containerName="manila-share" Nov 25 23:52:26 crc kubenswrapper[5045]: E1125 23:52:26.479195 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfa1937a-1752-4377-adaa-651a09c43cc9" containerName="probe" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.479278 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfa1937a-1752-4377-adaa-651a09c43cc9" containerName="probe" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.479834 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfa1937a-1752-4377-adaa-651a09c43cc9" containerName="manila-share" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.479943 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfa1937a-1752-4377-adaa-651a09c43cc9" containerName="probe" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.481292 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.483055 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.500038 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.653704 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-config-data\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.654024 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.654143 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-scripts\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.654251 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.654361 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-ceph\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.654448 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24tqw\" (UniqueName: \"kubernetes.io/projected/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-kube-api-access-24tqw\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.654526 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.654635 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.756686 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.757054 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-config-data\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.757091 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.757136 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-scripts\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.757189 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.757231 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-ceph\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.757253 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24tqw\" (UniqueName: \"kubernetes.io/projected/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-kube-api-access-24tqw\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.757273 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.757917 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.758006 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.762145 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-scripts\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.764848 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-ceph\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.767176 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-config-data\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.769626 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.770389 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.792465 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24tqw\" (UniqueName: \"kubernetes.io/projected/ce03c0a7-a5a4-48df-8d94-5d3c7464efc1-kube-api-access-24tqw\") pod \"manila-share-share1-0\" (UID: \"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1\") " pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.796424 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 25 23:52:26 crc kubenswrapper[5045]: I1125 23:52:26.983165 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:52:27 crc kubenswrapper[5045]: I1125 23:52:27.146045 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d111259-fd01-46df-9e3b-e25c7a05f59d","Type":"ContainerStarted","Data":"3b81cc1fd6345e2695fdc5ac3ff7ca744b5fd7c55e72afe10bb7d546fd2c17c5"} Nov 25 23:52:27 crc kubenswrapper[5045]: I1125 23:52:27.146272 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 23:52:27 crc kubenswrapper[5045]: I1125 23:52:27.176639 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.47641288 podStartE2EDuration="5.176616143s" podCreationTimestamp="2025-11-25 23:52:22 +0000 UTC" firstStartedPulling="2025-11-25 23:52:23.112895285 +0000 UTC m=+3199.470554397" lastFinishedPulling="2025-11-25 23:52:26.813098548 +0000 UTC m=+3203.170757660" observedRunningTime="2025-11-25 23:52:27.166233345 +0000 UTC m=+3203.523892467" watchObservedRunningTime="2025-11-25 23:52:27.176616143 +0000 UTC m=+3203.534275275" Nov 25 23:52:27 crc kubenswrapper[5045]: I1125 23:52:27.391853 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-88c75b4b-7vjc8" Nov 25 23:52:27 crc kubenswrapper[5045]: I1125 23:52:27.456590 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-747467cb48-48fv7"] Nov 25 23:52:27 crc kubenswrapper[5045]: I1125 23:52:27.456804 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-747467cb48-48fv7" podUID="776ddfbe-2357-4e0e-a89e-2d82c43f4212" containerName="horizon-log" containerID="cri-o://2d5e561c81f6531612a014b9520341369e949776b29c85765f231ed2088d955f" gracePeriod=30 Nov 25 23:52:27 crc kubenswrapper[5045]: I1125 23:52:27.457162 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-747467cb48-48fv7" podUID="776ddfbe-2357-4e0e-a89e-2d82c43f4212" containerName="horizon" containerID="cri-o://2cdb23388ba43fd01d1a47a0b8ea0d232d690d291e71b31e1ab3f0b5b14f72e8" gracePeriod=30 Nov 25 23:52:27 crc kubenswrapper[5045]: I1125 23:52:27.487042 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 23:52:28 crc kubenswrapper[5045]: I1125 23:52:28.159130 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1","Type":"ContainerStarted","Data":"5390255aed950d691c057cffa903683ec1cb1cf2c1aa014d5ae32ee45377325b"} Nov 25 23:52:28 crc kubenswrapper[5045]: I1125 23:52:28.159652 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1","Type":"ContainerStarted","Data":"b1269af48008cab3554b2301579704225494b98cbf67ff4d8ac7f8a343692eb6"} Nov 25 23:52:28 crc kubenswrapper[5045]: I1125 23:52:28.422241 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bfa1937a-1752-4377-adaa-651a09c43cc9" path="/var/lib/kubelet/pods/bfa1937a-1752-4377-adaa-651a09c43cc9/volumes" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.073640 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.168605 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"ce03c0a7-a5a4-48df-8d94-5d3c7464efc1","Type":"ContainerStarted","Data":"a361c1c076a53123b37f52070bb47c2c9400d561d52667a57b11eb2c2a5fe36e"} Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.173556 5045 generic.go:334] "Generic (PLEG): container finished" podID="74380024-104d-416f-a8e6-a6c13b16950f" containerID="732fe7fc9e3c5937c08c09bf6894190f42661057c12d08057d5ae96a76d6d8b7" exitCode=0 Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.173600 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"74380024-104d-416f-a8e6-a6c13b16950f","Type":"ContainerDied","Data":"732fe7fc9e3c5937c08c09bf6894190f42661057c12d08057d5ae96a76d6d8b7"} Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.173614 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.173627 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"74380024-104d-416f-a8e6-a6c13b16950f","Type":"ContainerDied","Data":"f9e2d1543472749699e8ffd9c9cd43f671539288588b08301e9c4e6ef28224cf"} Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.173652 5045 scope.go:117] "RemoveContainer" containerID="55c00e1ae84dee9a49b562189ae48d9393d8e11f69d5e551427a93d49dcaa358" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.198523 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=3.198503396 podStartE2EDuration="3.198503396s" podCreationTimestamp="2025-11-25 23:52:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:52:29.187502841 +0000 UTC m=+3205.545161953" watchObservedRunningTime="2025-11-25 23:52:29.198503396 +0000 UTC m=+3205.556162508" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.199942 5045 scope.go:117] "RemoveContainer" containerID="732fe7fc9e3c5937c08c09bf6894190f42661057c12d08057d5ae96a76d6d8b7" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.223326 5045 scope.go:117] "RemoveContainer" containerID="55c00e1ae84dee9a49b562189ae48d9393d8e11f69d5e551427a93d49dcaa358" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.226327 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-config-data-custom\") pod \"74380024-104d-416f-a8e6-a6c13b16950f\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.226358 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-scripts\") pod \"74380024-104d-416f-a8e6-a6c13b16950f\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.226389 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-config-data\") pod \"74380024-104d-416f-a8e6-a6c13b16950f\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.226471 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/74380024-104d-416f-a8e6-a6c13b16950f-etc-machine-id\") pod \"74380024-104d-416f-a8e6-a6c13b16950f\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.226532 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nz5jp\" (UniqueName: \"kubernetes.io/projected/74380024-104d-416f-a8e6-a6c13b16950f-kube-api-access-nz5jp\") pod \"74380024-104d-416f-a8e6-a6c13b16950f\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.227002 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/74380024-104d-416f-a8e6-a6c13b16950f-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "74380024-104d-416f-a8e6-a6c13b16950f" (UID: "74380024-104d-416f-a8e6-a6c13b16950f"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:52:29 crc kubenswrapper[5045]: E1125 23:52:29.227775 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55c00e1ae84dee9a49b562189ae48d9393d8e11f69d5e551427a93d49dcaa358\": container with ID starting with 55c00e1ae84dee9a49b562189ae48d9393d8e11f69d5e551427a93d49dcaa358 not found: ID does not exist" containerID="55c00e1ae84dee9a49b562189ae48d9393d8e11f69d5e551427a93d49dcaa358" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.227852 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55c00e1ae84dee9a49b562189ae48d9393d8e11f69d5e551427a93d49dcaa358"} err="failed to get container status \"55c00e1ae84dee9a49b562189ae48d9393d8e11f69d5e551427a93d49dcaa358\": rpc error: code = NotFound desc = could not find container \"55c00e1ae84dee9a49b562189ae48d9393d8e11f69d5e551427a93d49dcaa358\": container with ID starting with 55c00e1ae84dee9a49b562189ae48d9393d8e11f69d5e551427a93d49dcaa358 not found: ID does not exist" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.227874 5045 scope.go:117] "RemoveContainer" containerID="732fe7fc9e3c5937c08c09bf6894190f42661057c12d08057d5ae96a76d6d8b7" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.227949 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-combined-ca-bundle\") pod \"74380024-104d-416f-a8e6-a6c13b16950f\" (UID: \"74380024-104d-416f-a8e6-a6c13b16950f\") " Nov 25 23:52:29 crc kubenswrapper[5045]: E1125 23:52:29.229181 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"732fe7fc9e3c5937c08c09bf6894190f42661057c12d08057d5ae96a76d6d8b7\": container with ID starting with 732fe7fc9e3c5937c08c09bf6894190f42661057c12d08057d5ae96a76d6d8b7 not found: ID does not exist" containerID="732fe7fc9e3c5937c08c09bf6894190f42661057c12d08057d5ae96a76d6d8b7" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.229220 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"732fe7fc9e3c5937c08c09bf6894190f42661057c12d08057d5ae96a76d6d8b7"} err="failed to get container status \"732fe7fc9e3c5937c08c09bf6894190f42661057c12d08057d5ae96a76d6d8b7\": rpc error: code = NotFound desc = could not find container \"732fe7fc9e3c5937c08c09bf6894190f42661057c12d08057d5ae96a76d6d8b7\": container with ID starting with 732fe7fc9e3c5937c08c09bf6894190f42661057c12d08057d5ae96a76d6d8b7 not found: ID does not exist" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.230089 5045 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/74380024-104d-416f-a8e6-a6c13b16950f-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.232602 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-scripts" (OuterVolumeSpecName: "scripts") pod "74380024-104d-416f-a8e6-a6c13b16950f" (UID: "74380024-104d-416f-a8e6-a6c13b16950f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.236893 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74380024-104d-416f-a8e6-a6c13b16950f-kube-api-access-nz5jp" (OuterVolumeSpecName: "kube-api-access-nz5jp") pod "74380024-104d-416f-a8e6-a6c13b16950f" (UID: "74380024-104d-416f-a8e6-a6c13b16950f"). InnerVolumeSpecName "kube-api-access-nz5jp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.244865 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "74380024-104d-416f-a8e6-a6c13b16950f" (UID: "74380024-104d-416f-a8e6-a6c13b16950f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.290998 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "74380024-104d-416f-a8e6-a6c13b16950f" (UID: "74380024-104d-416f-a8e6-a6c13b16950f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.335159 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nz5jp\" (UniqueName: \"kubernetes.io/projected/74380024-104d-416f-a8e6-a6c13b16950f-kube-api-access-nz5jp\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.335185 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.335193 5045 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.335201 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.384693 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-config-data" (OuterVolumeSpecName: "config-data") pod "74380024-104d-416f-a8e6-a6c13b16950f" (UID: "74380024-104d-416f-a8e6-a6c13b16950f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.436960 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74380024-104d-416f-a8e6-a6c13b16950f-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.478175 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/manila-api-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.525391 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.528691 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.559913 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 23:52:29 crc kubenswrapper[5045]: E1125 23:52:29.560342 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74380024-104d-416f-a8e6-a6c13b16950f" containerName="manila-scheduler" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.560359 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="74380024-104d-416f-a8e6-a6c13b16950f" containerName="manila-scheduler" Nov 25 23:52:29 crc kubenswrapper[5045]: E1125 23:52:29.560373 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74380024-104d-416f-a8e6-a6c13b16950f" containerName="probe" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.560380 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="74380024-104d-416f-a8e6-a6c13b16950f" containerName="probe" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.560547 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="74380024-104d-416f-a8e6-a6c13b16950f" containerName="probe" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.560571 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="74380024-104d-416f-a8e6-a6c13b16950f" containerName="manila-scheduler" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.562259 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.565217 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.575917 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.758226 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3b78dcef-2652-44a3-8d97-cb40f963d225-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"3b78dcef-2652-44a3-8d97-cb40f963d225\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.758303 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b78dcef-2652-44a3-8d97-cb40f963d225-config-data\") pod \"manila-scheduler-0\" (UID: \"3b78dcef-2652-44a3-8d97-cb40f963d225\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.758377 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b78dcef-2652-44a3-8d97-cb40f963d225-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"3b78dcef-2652-44a3-8d97-cb40f963d225\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.758411 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n8m48\" (UniqueName: \"kubernetes.io/projected/3b78dcef-2652-44a3-8d97-cb40f963d225-kube-api-access-n8m48\") pod \"manila-scheduler-0\" (UID: \"3b78dcef-2652-44a3-8d97-cb40f963d225\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.758518 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3b78dcef-2652-44a3-8d97-cb40f963d225-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"3b78dcef-2652-44a3-8d97-cb40f963d225\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.758595 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b78dcef-2652-44a3-8d97-cb40f963d225-scripts\") pod \"manila-scheduler-0\" (UID: \"3b78dcef-2652-44a3-8d97-cb40f963d225\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.860840 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3b78dcef-2652-44a3-8d97-cb40f963d225-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"3b78dcef-2652-44a3-8d97-cb40f963d225\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.860921 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b78dcef-2652-44a3-8d97-cb40f963d225-scripts\") pod \"manila-scheduler-0\" (UID: \"3b78dcef-2652-44a3-8d97-cb40f963d225\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.861000 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3b78dcef-2652-44a3-8d97-cb40f963d225-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"3b78dcef-2652-44a3-8d97-cb40f963d225\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.861004 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3b78dcef-2652-44a3-8d97-cb40f963d225-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"3b78dcef-2652-44a3-8d97-cb40f963d225\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.861028 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b78dcef-2652-44a3-8d97-cb40f963d225-config-data\") pod \"manila-scheduler-0\" (UID: \"3b78dcef-2652-44a3-8d97-cb40f963d225\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.861130 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b78dcef-2652-44a3-8d97-cb40f963d225-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"3b78dcef-2652-44a3-8d97-cb40f963d225\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.861170 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n8m48\" (UniqueName: \"kubernetes.io/projected/3b78dcef-2652-44a3-8d97-cb40f963d225-kube-api-access-n8m48\") pod \"manila-scheduler-0\" (UID: \"3b78dcef-2652-44a3-8d97-cb40f963d225\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.865466 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b78dcef-2652-44a3-8d97-cb40f963d225-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"3b78dcef-2652-44a3-8d97-cb40f963d225\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.866660 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3b78dcef-2652-44a3-8d97-cb40f963d225-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"3b78dcef-2652-44a3-8d97-cb40f963d225\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.867542 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b78dcef-2652-44a3-8d97-cb40f963d225-scripts\") pod \"manila-scheduler-0\" (UID: \"3b78dcef-2652-44a3-8d97-cb40f963d225\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.873584 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b78dcef-2652-44a3-8d97-cb40f963d225-config-data\") pod \"manila-scheduler-0\" (UID: \"3b78dcef-2652-44a3-8d97-cb40f963d225\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.877888 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n8m48\" (UniqueName: \"kubernetes.io/projected/3b78dcef-2652-44a3-8d97-cb40f963d225-kube-api-access-n8m48\") pod \"manila-scheduler-0\" (UID: \"3b78dcef-2652-44a3-8d97-cb40f963d225\") " pod="openstack/manila-scheduler-0" Nov 25 23:52:29 crc kubenswrapper[5045]: I1125 23:52:29.885090 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 25 23:52:30 crc kubenswrapper[5045]: I1125 23:52:30.365252 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 23:52:30 crc kubenswrapper[5045]: W1125 23:52:30.370564 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3b78dcef_2652_44a3_8d97_cb40f963d225.slice/crio-50d85a6f78a84874f246d034ca7ef986c6591506343c3d4b3228d59c4ab38a90 WatchSource:0}: Error finding container 50d85a6f78a84874f246d034ca7ef986c6591506343c3d4b3228d59c4ab38a90: Status 404 returned error can't find the container with id 50d85a6f78a84874f246d034ca7ef986c6591506343c3d4b3228d59c4ab38a90 Nov 25 23:52:30 crc kubenswrapper[5045]: I1125 23:52:30.398134 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:52:30 crc kubenswrapper[5045]: E1125 23:52:30.398461 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:52:30 crc kubenswrapper[5045]: I1125 23:52:30.409678 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74380024-104d-416f-a8e6-a6c13b16950f" path="/var/lib/kubelet/pods/74380024-104d-416f-a8e6-a6c13b16950f/volumes" Nov 25 23:52:31 crc kubenswrapper[5045]: I1125 23:52:31.201481 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"3b78dcef-2652-44a3-8d97-cb40f963d225","Type":"ContainerStarted","Data":"b117931f4d5045e05054ecb341d1b81932890b2e0d3da43989aa465c9ed9d44e"} Nov 25 23:52:31 crc kubenswrapper[5045]: I1125 23:52:31.202024 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"3b78dcef-2652-44a3-8d97-cb40f963d225","Type":"ContainerStarted","Data":"50d85a6f78a84874f246d034ca7ef986c6591506343c3d4b3228d59c4ab38a90"} Nov 25 23:52:31 crc kubenswrapper[5045]: I1125 23:52:31.204187 5045 generic.go:334] "Generic (PLEG): container finished" podID="776ddfbe-2357-4e0e-a89e-2d82c43f4212" containerID="2cdb23388ba43fd01d1a47a0b8ea0d232d690d291e71b31e1ab3f0b5b14f72e8" exitCode=0 Nov 25 23:52:31 crc kubenswrapper[5045]: I1125 23:52:31.204231 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-747467cb48-48fv7" event={"ID":"776ddfbe-2357-4e0e-a89e-2d82c43f4212","Type":"ContainerDied","Data":"2cdb23388ba43fd01d1a47a0b8ea0d232d690d291e71b31e1ab3f0b5b14f72e8"} Nov 25 23:52:32 crc kubenswrapper[5045]: I1125 23:52:32.230945 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"3b78dcef-2652-44a3-8d97-cb40f963d225","Type":"ContainerStarted","Data":"21a8cbb5c3db28a70e96fab4a0a49f086793ee1d466e69e6a6fe45c1a6f5d739"} Nov 25 23:52:32 crc kubenswrapper[5045]: I1125 23:52:32.273465 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=3.273437365 podStartE2EDuration="3.273437365s" podCreationTimestamp="2025-11-25 23:52:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 23:52:32.253965696 +0000 UTC m=+3208.611624868" watchObservedRunningTime="2025-11-25 23:52:32.273437365 +0000 UTC m=+3208.631096507" Nov 25 23:52:33 crc kubenswrapper[5045]: I1125 23:52:33.006861 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-747467cb48-48fv7" podUID="776ddfbe-2357-4e0e-a89e-2d82c43f4212" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.241:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.241:8443: connect: connection refused" Nov 25 23:52:36 crc kubenswrapper[5045]: I1125 23:52:36.796877 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 23:52:39 crc kubenswrapper[5045]: I1125 23:52:39.886017 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Nov 25 23:52:43 crc kubenswrapper[5045]: I1125 23:52:43.006842 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-747467cb48-48fv7" podUID="776ddfbe-2357-4e0e-a89e-2d82c43f4212" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.241:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.241:8443: connect: connection refused" Nov 25 23:52:45 crc kubenswrapper[5045]: I1125 23:52:45.397028 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:52:45 crc kubenswrapper[5045]: E1125 23:52:45.398237 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:52:48 crc kubenswrapper[5045]: I1125 23:52:48.273465 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Nov 25 23:52:51 crc kubenswrapper[5045]: I1125 23:52:51.260144 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Nov 25 23:52:52 crc kubenswrapper[5045]: I1125 23:52:52.642871 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 23:52:53 crc kubenswrapper[5045]: I1125 23:52:53.008894 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-747467cb48-48fv7" podUID="776ddfbe-2357-4e0e-a89e-2d82c43f4212" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.241:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.241:8443: connect: connection refused" Nov 25 23:52:53 crc kubenswrapper[5045]: I1125 23:52:53.009349 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:52:56 crc kubenswrapper[5045]: I1125 23:52:56.397286 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:52:56 crc kubenswrapper[5045]: E1125 23:52:56.399665 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:52:57 crc kubenswrapper[5045]: I1125 23:52:57.997035 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.117580 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/776ddfbe-2357-4e0e-a89e-2d82c43f4212-horizon-secret-key\") pod \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.117777 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/776ddfbe-2357-4e0e-a89e-2d82c43f4212-horizon-tls-certs\") pod \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.117851 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6654s\" (UniqueName: \"kubernetes.io/projected/776ddfbe-2357-4e0e-a89e-2d82c43f4212-kube-api-access-6654s\") pod \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.117909 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/776ddfbe-2357-4e0e-a89e-2d82c43f4212-config-data\") pod \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.117949 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776ddfbe-2357-4e0e-a89e-2d82c43f4212-combined-ca-bundle\") pod \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.117980 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/776ddfbe-2357-4e0e-a89e-2d82c43f4212-logs\") pod \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.118052 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/776ddfbe-2357-4e0e-a89e-2d82c43f4212-scripts\") pod \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\" (UID: \"776ddfbe-2357-4e0e-a89e-2d82c43f4212\") " Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.120491 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/776ddfbe-2357-4e0e-a89e-2d82c43f4212-logs" (OuterVolumeSpecName: "logs") pod "776ddfbe-2357-4e0e-a89e-2d82c43f4212" (UID: "776ddfbe-2357-4e0e-a89e-2d82c43f4212"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.124457 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/776ddfbe-2357-4e0e-a89e-2d82c43f4212-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "776ddfbe-2357-4e0e-a89e-2d82c43f4212" (UID: "776ddfbe-2357-4e0e-a89e-2d82c43f4212"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.124540 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/776ddfbe-2357-4e0e-a89e-2d82c43f4212-kube-api-access-6654s" (OuterVolumeSpecName: "kube-api-access-6654s") pod "776ddfbe-2357-4e0e-a89e-2d82c43f4212" (UID: "776ddfbe-2357-4e0e-a89e-2d82c43f4212"). InnerVolumeSpecName "kube-api-access-6654s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.144931 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/776ddfbe-2357-4e0e-a89e-2d82c43f4212-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "776ddfbe-2357-4e0e-a89e-2d82c43f4212" (UID: "776ddfbe-2357-4e0e-a89e-2d82c43f4212"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.145204 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/776ddfbe-2357-4e0e-a89e-2d82c43f4212-config-data" (OuterVolumeSpecName: "config-data") pod "776ddfbe-2357-4e0e-a89e-2d82c43f4212" (UID: "776ddfbe-2357-4e0e-a89e-2d82c43f4212"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.155457 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/776ddfbe-2357-4e0e-a89e-2d82c43f4212-scripts" (OuterVolumeSpecName: "scripts") pod "776ddfbe-2357-4e0e-a89e-2d82c43f4212" (UID: "776ddfbe-2357-4e0e-a89e-2d82c43f4212"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.175909 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/776ddfbe-2357-4e0e-a89e-2d82c43f4212-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "776ddfbe-2357-4e0e-a89e-2d82c43f4212" (UID: "776ddfbe-2357-4e0e-a89e-2d82c43f4212"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.220185 5045 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/776ddfbe-2357-4e0e-a89e-2d82c43f4212-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.220282 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6654s\" (UniqueName: \"kubernetes.io/projected/776ddfbe-2357-4e0e-a89e-2d82c43f4212-kube-api-access-6654s\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.220296 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/776ddfbe-2357-4e0e-a89e-2d82c43f4212-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.220305 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776ddfbe-2357-4e0e-a89e-2d82c43f4212-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.220313 5045 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/776ddfbe-2357-4e0e-a89e-2d82c43f4212-logs\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.220323 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/776ddfbe-2357-4e0e-a89e-2d82c43f4212-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.220333 5045 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/776ddfbe-2357-4e0e-a89e-2d82c43f4212-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.545377 5045 generic.go:334] "Generic (PLEG): container finished" podID="776ddfbe-2357-4e0e-a89e-2d82c43f4212" containerID="2d5e561c81f6531612a014b9520341369e949776b29c85765f231ed2088d955f" exitCode=137 Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.545633 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-747467cb48-48fv7" event={"ID":"776ddfbe-2357-4e0e-a89e-2d82c43f4212","Type":"ContainerDied","Data":"2d5e561c81f6531612a014b9520341369e949776b29c85765f231ed2088d955f"} Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.545667 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-747467cb48-48fv7" event={"ID":"776ddfbe-2357-4e0e-a89e-2d82c43f4212","Type":"ContainerDied","Data":"e954849daa2b4bdc36edea7f5ce6b318a3ea7393e639b3a48078e014ff34d3e7"} Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.545705 5045 scope.go:117] "RemoveContainer" containerID="2cdb23388ba43fd01d1a47a0b8ea0d232d690d291e71b31e1ab3f0b5b14f72e8" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.545947 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-747467cb48-48fv7" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.615931 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-747467cb48-48fv7"] Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.626065 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-747467cb48-48fv7"] Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.797254 5045 scope.go:117] "RemoveContainer" containerID="2d5e561c81f6531612a014b9520341369e949776b29c85765f231ed2088d955f" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.821623 5045 scope.go:117] "RemoveContainer" containerID="2cdb23388ba43fd01d1a47a0b8ea0d232d690d291e71b31e1ab3f0b5b14f72e8" Nov 25 23:52:58 crc kubenswrapper[5045]: E1125 23:52:58.822540 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2cdb23388ba43fd01d1a47a0b8ea0d232d690d291e71b31e1ab3f0b5b14f72e8\": container with ID starting with 2cdb23388ba43fd01d1a47a0b8ea0d232d690d291e71b31e1ab3f0b5b14f72e8 not found: ID does not exist" containerID="2cdb23388ba43fd01d1a47a0b8ea0d232d690d291e71b31e1ab3f0b5b14f72e8" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.822598 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2cdb23388ba43fd01d1a47a0b8ea0d232d690d291e71b31e1ab3f0b5b14f72e8"} err="failed to get container status \"2cdb23388ba43fd01d1a47a0b8ea0d232d690d291e71b31e1ab3f0b5b14f72e8\": rpc error: code = NotFound desc = could not find container \"2cdb23388ba43fd01d1a47a0b8ea0d232d690d291e71b31e1ab3f0b5b14f72e8\": container with ID starting with 2cdb23388ba43fd01d1a47a0b8ea0d232d690d291e71b31e1ab3f0b5b14f72e8 not found: ID does not exist" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.822632 5045 scope.go:117] "RemoveContainer" containerID="2d5e561c81f6531612a014b9520341369e949776b29c85765f231ed2088d955f" Nov 25 23:52:58 crc kubenswrapper[5045]: E1125 23:52:58.823231 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d5e561c81f6531612a014b9520341369e949776b29c85765f231ed2088d955f\": container with ID starting with 2d5e561c81f6531612a014b9520341369e949776b29c85765f231ed2088d955f not found: ID does not exist" containerID="2d5e561c81f6531612a014b9520341369e949776b29c85765f231ed2088d955f" Nov 25 23:52:58 crc kubenswrapper[5045]: I1125 23:52:58.823308 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d5e561c81f6531612a014b9520341369e949776b29c85765f231ed2088d955f"} err="failed to get container status \"2d5e561c81f6531612a014b9520341369e949776b29c85765f231ed2088d955f\": rpc error: code = NotFound desc = could not find container \"2d5e561c81f6531612a014b9520341369e949776b29c85765f231ed2088d955f\": container with ID starting with 2d5e561c81f6531612a014b9520341369e949776b29c85765f231ed2088d955f not found: ID does not exist" Nov 25 23:53:00 crc kubenswrapper[5045]: I1125 23:53:00.414389 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="776ddfbe-2357-4e0e-a89e-2d82c43f4212" path="/var/lib/kubelet/pods/776ddfbe-2357-4e0e-a89e-2d82c43f4212/volumes" Nov 25 23:53:07 crc kubenswrapper[5045]: I1125 23:53:07.820281 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4pvrw"] Nov 25 23:53:07 crc kubenswrapper[5045]: E1125 23:53:07.821620 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="776ddfbe-2357-4e0e-a89e-2d82c43f4212" containerName="horizon" Nov 25 23:53:07 crc kubenswrapper[5045]: I1125 23:53:07.821644 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="776ddfbe-2357-4e0e-a89e-2d82c43f4212" containerName="horizon" Nov 25 23:53:07 crc kubenswrapper[5045]: E1125 23:53:07.821686 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="776ddfbe-2357-4e0e-a89e-2d82c43f4212" containerName="horizon-log" Nov 25 23:53:07 crc kubenswrapper[5045]: I1125 23:53:07.821698 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="776ddfbe-2357-4e0e-a89e-2d82c43f4212" containerName="horizon-log" Nov 25 23:53:07 crc kubenswrapper[5045]: I1125 23:53:07.822056 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="776ddfbe-2357-4e0e-a89e-2d82c43f4212" containerName="horizon" Nov 25 23:53:07 crc kubenswrapper[5045]: I1125 23:53:07.822087 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="776ddfbe-2357-4e0e-a89e-2d82c43f4212" containerName="horizon-log" Nov 25 23:53:07 crc kubenswrapper[5045]: I1125 23:53:07.824678 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4pvrw" Nov 25 23:53:07 crc kubenswrapper[5045]: I1125 23:53:07.849626 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4pvrw"] Nov 25 23:53:07 crc kubenswrapper[5045]: I1125 23:53:07.871364 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5x84m\" (UniqueName: \"kubernetes.io/projected/813c11be-9668-446f-9c2d-a1f84eb0ef50-kube-api-access-5x84m\") pod \"community-operators-4pvrw\" (UID: \"813c11be-9668-446f-9c2d-a1f84eb0ef50\") " pod="openshift-marketplace/community-operators-4pvrw" Nov 25 23:53:07 crc kubenswrapper[5045]: I1125 23:53:07.871964 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/813c11be-9668-446f-9c2d-a1f84eb0ef50-catalog-content\") pod \"community-operators-4pvrw\" (UID: \"813c11be-9668-446f-9c2d-a1f84eb0ef50\") " pod="openshift-marketplace/community-operators-4pvrw" Nov 25 23:53:07 crc kubenswrapper[5045]: I1125 23:53:07.872043 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/813c11be-9668-446f-9c2d-a1f84eb0ef50-utilities\") pod \"community-operators-4pvrw\" (UID: \"813c11be-9668-446f-9c2d-a1f84eb0ef50\") " pod="openshift-marketplace/community-operators-4pvrw" Nov 25 23:53:07 crc kubenswrapper[5045]: I1125 23:53:07.973604 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5x84m\" (UniqueName: \"kubernetes.io/projected/813c11be-9668-446f-9c2d-a1f84eb0ef50-kube-api-access-5x84m\") pod \"community-operators-4pvrw\" (UID: \"813c11be-9668-446f-9c2d-a1f84eb0ef50\") " pod="openshift-marketplace/community-operators-4pvrw" Nov 25 23:53:07 crc kubenswrapper[5045]: I1125 23:53:07.973729 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/813c11be-9668-446f-9c2d-a1f84eb0ef50-catalog-content\") pod \"community-operators-4pvrw\" (UID: \"813c11be-9668-446f-9c2d-a1f84eb0ef50\") " pod="openshift-marketplace/community-operators-4pvrw" Nov 25 23:53:07 crc kubenswrapper[5045]: I1125 23:53:07.973760 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/813c11be-9668-446f-9c2d-a1f84eb0ef50-utilities\") pod \"community-operators-4pvrw\" (UID: \"813c11be-9668-446f-9c2d-a1f84eb0ef50\") " pod="openshift-marketplace/community-operators-4pvrw" Nov 25 23:53:07 crc kubenswrapper[5045]: I1125 23:53:07.974268 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/813c11be-9668-446f-9c2d-a1f84eb0ef50-utilities\") pod \"community-operators-4pvrw\" (UID: \"813c11be-9668-446f-9c2d-a1f84eb0ef50\") " pod="openshift-marketplace/community-operators-4pvrw" Nov 25 23:53:07 crc kubenswrapper[5045]: I1125 23:53:07.974277 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/813c11be-9668-446f-9c2d-a1f84eb0ef50-catalog-content\") pod \"community-operators-4pvrw\" (UID: \"813c11be-9668-446f-9c2d-a1f84eb0ef50\") " pod="openshift-marketplace/community-operators-4pvrw" Nov 25 23:53:07 crc kubenswrapper[5045]: I1125 23:53:07.995531 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5x84m\" (UniqueName: \"kubernetes.io/projected/813c11be-9668-446f-9c2d-a1f84eb0ef50-kube-api-access-5x84m\") pod \"community-operators-4pvrw\" (UID: \"813c11be-9668-446f-9c2d-a1f84eb0ef50\") " pod="openshift-marketplace/community-operators-4pvrw" Nov 25 23:53:08 crc kubenswrapper[5045]: I1125 23:53:08.164472 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4pvrw" Nov 25 23:53:08 crc kubenswrapper[5045]: I1125 23:53:08.702289 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4pvrw"] Nov 25 23:53:09 crc kubenswrapper[5045]: I1125 23:53:09.689472 5045 generic.go:334] "Generic (PLEG): container finished" podID="813c11be-9668-446f-9c2d-a1f84eb0ef50" containerID="c50ff422eb92ba76b4cb03e9f22b594cae49194df45fdd63e55a3d9f4a000f21" exitCode=0 Nov 25 23:53:09 crc kubenswrapper[5045]: I1125 23:53:09.689559 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4pvrw" event={"ID":"813c11be-9668-446f-9c2d-a1f84eb0ef50","Type":"ContainerDied","Data":"c50ff422eb92ba76b4cb03e9f22b594cae49194df45fdd63e55a3d9f4a000f21"} Nov 25 23:53:09 crc kubenswrapper[5045]: I1125 23:53:09.690154 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4pvrw" event={"ID":"813c11be-9668-446f-9c2d-a1f84eb0ef50","Type":"ContainerStarted","Data":"851fd97606d44c9a7885841bf1e63d3e36bcde96d5d2bbfae7e8b14849c997fc"} Nov 25 23:53:10 crc kubenswrapper[5045]: I1125 23:53:10.397178 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:53:10 crc kubenswrapper[5045]: E1125 23:53:10.397484 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:53:11 crc kubenswrapper[5045]: I1125 23:53:11.716635 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4pvrw" event={"ID":"813c11be-9668-446f-9c2d-a1f84eb0ef50","Type":"ContainerStarted","Data":"4d5c323e99bbaf9a3daf73c4ff2fd228a8e8adbbb4c94552d6647925fa823f6d"} Nov 25 23:53:12 crc kubenswrapper[5045]: I1125 23:53:12.731479 5045 generic.go:334] "Generic (PLEG): container finished" podID="813c11be-9668-446f-9c2d-a1f84eb0ef50" containerID="4d5c323e99bbaf9a3daf73c4ff2fd228a8e8adbbb4c94552d6647925fa823f6d" exitCode=0 Nov 25 23:53:12 crc kubenswrapper[5045]: I1125 23:53:12.731574 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4pvrw" event={"ID":"813c11be-9668-446f-9c2d-a1f84eb0ef50","Type":"ContainerDied","Data":"4d5c323e99bbaf9a3daf73c4ff2fd228a8e8adbbb4c94552d6647925fa823f6d"} Nov 25 23:53:13 crc kubenswrapper[5045]: I1125 23:53:13.741338 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4pvrw" event={"ID":"813c11be-9668-446f-9c2d-a1f84eb0ef50","Type":"ContainerStarted","Data":"4d398339d9c4c2c83298cd6b2688d543faf1dbba8ddcc11457ae2ecfb30ed024"} Nov 25 23:53:13 crc kubenswrapper[5045]: I1125 23:53:13.763836 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4pvrw" podStartSLOduration=3.016897256 podStartE2EDuration="6.763817242s" podCreationTimestamp="2025-11-25 23:53:07 +0000 UTC" firstStartedPulling="2025-11-25 23:53:09.692413191 +0000 UTC m=+3246.050072343" lastFinishedPulling="2025-11-25 23:53:13.439333207 +0000 UTC m=+3249.796992329" observedRunningTime="2025-11-25 23:53:13.762167706 +0000 UTC m=+3250.119826828" watchObservedRunningTime="2025-11-25 23:53:13.763817242 +0000 UTC m=+3250.121476354" Nov 25 23:53:18 crc kubenswrapper[5045]: I1125 23:53:18.164861 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4pvrw" Nov 25 23:53:18 crc kubenswrapper[5045]: I1125 23:53:18.165284 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4pvrw" Nov 25 23:53:18 crc kubenswrapper[5045]: I1125 23:53:18.229263 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4pvrw" Nov 25 23:53:18 crc kubenswrapper[5045]: I1125 23:53:18.883454 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4pvrw" Nov 25 23:53:18 crc kubenswrapper[5045]: I1125 23:53:18.962375 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4pvrw"] Nov 25 23:53:20 crc kubenswrapper[5045]: I1125 23:53:20.821646 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4pvrw" podUID="813c11be-9668-446f-9c2d-a1f84eb0ef50" containerName="registry-server" containerID="cri-o://4d398339d9c4c2c83298cd6b2688d543faf1dbba8ddcc11457ae2ecfb30ed024" gracePeriod=2 Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.320095 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4pvrw" Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.356328 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/813c11be-9668-446f-9c2d-a1f84eb0ef50-utilities\") pod \"813c11be-9668-446f-9c2d-a1f84eb0ef50\" (UID: \"813c11be-9668-446f-9c2d-a1f84eb0ef50\") " Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.356575 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/813c11be-9668-446f-9c2d-a1f84eb0ef50-catalog-content\") pod \"813c11be-9668-446f-9c2d-a1f84eb0ef50\" (UID: \"813c11be-9668-446f-9c2d-a1f84eb0ef50\") " Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.356619 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5x84m\" (UniqueName: \"kubernetes.io/projected/813c11be-9668-446f-9c2d-a1f84eb0ef50-kube-api-access-5x84m\") pod \"813c11be-9668-446f-9c2d-a1f84eb0ef50\" (UID: \"813c11be-9668-446f-9c2d-a1f84eb0ef50\") " Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.357360 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/813c11be-9668-446f-9c2d-a1f84eb0ef50-utilities" (OuterVolumeSpecName: "utilities") pod "813c11be-9668-446f-9c2d-a1f84eb0ef50" (UID: "813c11be-9668-446f-9c2d-a1f84eb0ef50"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.369265 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/813c11be-9668-446f-9c2d-a1f84eb0ef50-kube-api-access-5x84m" (OuterVolumeSpecName: "kube-api-access-5x84m") pod "813c11be-9668-446f-9c2d-a1f84eb0ef50" (UID: "813c11be-9668-446f-9c2d-a1f84eb0ef50"). InnerVolumeSpecName "kube-api-access-5x84m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.427207 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/813c11be-9668-446f-9c2d-a1f84eb0ef50-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "813c11be-9668-446f-9c2d-a1f84eb0ef50" (UID: "813c11be-9668-446f-9c2d-a1f84eb0ef50"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.459600 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/813c11be-9668-446f-9c2d-a1f84eb0ef50-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.459817 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/813c11be-9668-446f-9c2d-a1f84eb0ef50-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.459915 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5x84m\" (UniqueName: \"kubernetes.io/projected/813c11be-9668-446f-9c2d-a1f84eb0ef50-kube-api-access-5x84m\") on node \"crc\" DevicePath \"\"" Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.830936 5045 generic.go:334] "Generic (PLEG): container finished" podID="813c11be-9668-446f-9c2d-a1f84eb0ef50" containerID="4d398339d9c4c2c83298cd6b2688d543faf1dbba8ddcc11457ae2ecfb30ed024" exitCode=0 Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.831011 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4pvrw" Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.831056 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4pvrw" event={"ID":"813c11be-9668-446f-9c2d-a1f84eb0ef50","Type":"ContainerDied","Data":"4d398339d9c4c2c83298cd6b2688d543faf1dbba8ddcc11457ae2ecfb30ed024"} Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.831492 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4pvrw" event={"ID":"813c11be-9668-446f-9c2d-a1f84eb0ef50","Type":"ContainerDied","Data":"851fd97606d44c9a7885841bf1e63d3e36bcde96d5d2bbfae7e8b14849c997fc"} Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.831533 5045 scope.go:117] "RemoveContainer" containerID="4d398339d9c4c2c83298cd6b2688d543faf1dbba8ddcc11457ae2ecfb30ed024" Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.857668 5045 scope.go:117] "RemoveContainer" containerID="4d5c323e99bbaf9a3daf73c4ff2fd228a8e8adbbb4c94552d6647925fa823f6d" Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.884766 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4pvrw"] Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.894024 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4pvrw"] Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.902456 5045 scope.go:117] "RemoveContainer" containerID="c50ff422eb92ba76b4cb03e9f22b594cae49194df45fdd63e55a3d9f4a000f21" Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.938340 5045 scope.go:117] "RemoveContainer" containerID="4d398339d9c4c2c83298cd6b2688d543faf1dbba8ddcc11457ae2ecfb30ed024" Nov 25 23:53:21 crc kubenswrapper[5045]: E1125 23:53:21.940742 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d398339d9c4c2c83298cd6b2688d543faf1dbba8ddcc11457ae2ecfb30ed024\": container with ID starting with 4d398339d9c4c2c83298cd6b2688d543faf1dbba8ddcc11457ae2ecfb30ed024 not found: ID does not exist" containerID="4d398339d9c4c2c83298cd6b2688d543faf1dbba8ddcc11457ae2ecfb30ed024" Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.941178 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d398339d9c4c2c83298cd6b2688d543faf1dbba8ddcc11457ae2ecfb30ed024"} err="failed to get container status \"4d398339d9c4c2c83298cd6b2688d543faf1dbba8ddcc11457ae2ecfb30ed024\": rpc error: code = NotFound desc = could not find container \"4d398339d9c4c2c83298cd6b2688d543faf1dbba8ddcc11457ae2ecfb30ed024\": container with ID starting with 4d398339d9c4c2c83298cd6b2688d543faf1dbba8ddcc11457ae2ecfb30ed024 not found: ID does not exist" Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.941362 5045 scope.go:117] "RemoveContainer" containerID="4d5c323e99bbaf9a3daf73c4ff2fd228a8e8adbbb4c94552d6647925fa823f6d" Nov 25 23:53:21 crc kubenswrapper[5045]: E1125 23:53:21.942050 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d5c323e99bbaf9a3daf73c4ff2fd228a8e8adbbb4c94552d6647925fa823f6d\": container with ID starting with 4d5c323e99bbaf9a3daf73c4ff2fd228a8e8adbbb4c94552d6647925fa823f6d not found: ID does not exist" containerID="4d5c323e99bbaf9a3daf73c4ff2fd228a8e8adbbb4c94552d6647925fa823f6d" Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.942122 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d5c323e99bbaf9a3daf73c4ff2fd228a8e8adbbb4c94552d6647925fa823f6d"} err="failed to get container status \"4d5c323e99bbaf9a3daf73c4ff2fd228a8e8adbbb4c94552d6647925fa823f6d\": rpc error: code = NotFound desc = could not find container \"4d5c323e99bbaf9a3daf73c4ff2fd228a8e8adbbb4c94552d6647925fa823f6d\": container with ID starting with 4d5c323e99bbaf9a3daf73c4ff2fd228a8e8adbbb4c94552d6647925fa823f6d not found: ID does not exist" Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.942160 5045 scope.go:117] "RemoveContainer" containerID="c50ff422eb92ba76b4cb03e9f22b594cae49194df45fdd63e55a3d9f4a000f21" Nov 25 23:53:21 crc kubenswrapper[5045]: E1125 23:53:21.943024 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c50ff422eb92ba76b4cb03e9f22b594cae49194df45fdd63e55a3d9f4a000f21\": container with ID starting with c50ff422eb92ba76b4cb03e9f22b594cae49194df45fdd63e55a3d9f4a000f21 not found: ID does not exist" containerID="c50ff422eb92ba76b4cb03e9f22b594cae49194df45fdd63e55a3d9f4a000f21" Nov 25 23:53:21 crc kubenswrapper[5045]: I1125 23:53:21.943086 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c50ff422eb92ba76b4cb03e9f22b594cae49194df45fdd63e55a3d9f4a000f21"} err="failed to get container status \"c50ff422eb92ba76b4cb03e9f22b594cae49194df45fdd63e55a3d9f4a000f21\": rpc error: code = NotFound desc = could not find container \"c50ff422eb92ba76b4cb03e9f22b594cae49194df45fdd63e55a3d9f4a000f21\": container with ID starting with c50ff422eb92ba76b4cb03e9f22b594cae49194df45fdd63e55a3d9f4a000f21 not found: ID does not exist" Nov 25 23:53:22 crc kubenswrapper[5045]: I1125 23:53:22.414521 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="813c11be-9668-446f-9c2d-a1f84eb0ef50" path="/var/lib/kubelet/pods/813c11be-9668-446f-9c2d-a1f84eb0ef50/volumes" Nov 25 23:53:24 crc kubenswrapper[5045]: I1125 23:53:24.419166 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:53:24 crc kubenswrapper[5045]: E1125 23:53:24.419842 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 25 23:53:36 crc kubenswrapper[5045]: I1125 23:53:36.396822 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:53:37 crc kubenswrapper[5045]: I1125 23:53:37.035654 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerStarted","Data":"8a05b3b735e54a7242e242635842fa11266f50eddb0a08bb26e8707c81df9b5b"} Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.233548 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 23:53:41 crc kubenswrapper[5045]: E1125 23:53:41.239186 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="813c11be-9668-446f-9c2d-a1f84eb0ef50" containerName="extract-utilities" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.239257 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="813c11be-9668-446f-9c2d-a1f84eb0ef50" containerName="extract-utilities" Nov 25 23:53:41 crc kubenswrapper[5045]: E1125 23:53:41.239274 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="813c11be-9668-446f-9c2d-a1f84eb0ef50" containerName="registry-server" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.239284 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="813c11be-9668-446f-9c2d-a1f84eb0ef50" containerName="registry-server" Nov 25 23:53:41 crc kubenswrapper[5045]: E1125 23:53:41.239310 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="813c11be-9668-446f-9c2d-a1f84eb0ef50" containerName="extract-content" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.239323 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="813c11be-9668-446f-9c2d-a1f84eb0ef50" containerName="extract-content" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.239644 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="813c11be-9668-446f-9c2d-a1f84eb0ef50" containerName="registry-server" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.240639 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.243882 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.245635 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.245766 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-c2w4r" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.246856 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.259839 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.323830 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/44c7f66f-9267-4165-8d51-e8fa7b33b354-config-data\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.323961 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/44c7f66f-9267-4165-8d51-e8fa7b33b354-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.324155 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h54ms\" (UniqueName: \"kubernetes.io/projected/44c7f66f-9267-4165-8d51-e8fa7b33b354-kube-api-access-h54ms\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.324315 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/44c7f66f-9267-4165-8d51-e8fa7b33b354-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.324381 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/44c7f66f-9267-4165-8d51-e8fa7b33b354-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.324581 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/44c7f66f-9267-4165-8d51-e8fa7b33b354-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.324664 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.324794 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/44c7f66f-9267-4165-8d51-e8fa7b33b354-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.324987 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/44c7f66f-9267-4165-8d51-e8fa7b33b354-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.427455 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/44c7f66f-9267-4165-8d51-e8fa7b33b354-config-data\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.427578 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/44c7f66f-9267-4165-8d51-e8fa7b33b354-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.427654 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h54ms\" (UniqueName: \"kubernetes.io/projected/44c7f66f-9267-4165-8d51-e8fa7b33b354-kube-api-access-h54ms\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.427735 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/44c7f66f-9267-4165-8d51-e8fa7b33b354-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.427771 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/44c7f66f-9267-4165-8d51-e8fa7b33b354-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.427850 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/44c7f66f-9267-4165-8d51-e8fa7b33b354-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.427887 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.427917 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/44c7f66f-9267-4165-8d51-e8fa7b33b354-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.427990 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/44c7f66f-9267-4165-8d51-e8fa7b33b354-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.428825 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/44c7f66f-9267-4165-8d51-e8fa7b33b354-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.428826 5045 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.428911 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/44c7f66f-9267-4165-8d51-e8fa7b33b354-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.429123 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/44c7f66f-9267-4165-8d51-e8fa7b33b354-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.430021 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/44c7f66f-9267-4165-8d51-e8fa7b33b354-config-data\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.438806 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/44c7f66f-9267-4165-8d51-e8fa7b33b354-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.440498 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/44c7f66f-9267-4165-8d51-e8fa7b33b354-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.452882 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/44c7f66f-9267-4165-8d51-e8fa7b33b354-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.455809 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h54ms\" (UniqueName: \"kubernetes.io/projected/44c7f66f-9267-4165-8d51-e8fa7b33b354-kube-api-access-h54ms\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.503548 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"tempest-tests-tempest\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " pod="openstack/tempest-tests-tempest" Nov 25 23:53:41 crc kubenswrapper[5045]: I1125 23:53:41.584535 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 23:53:42 crc kubenswrapper[5045]: I1125 23:53:42.094402 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 23:53:43 crc kubenswrapper[5045]: I1125 23:53:43.121402 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"44c7f66f-9267-4165-8d51-e8fa7b33b354","Type":"ContainerStarted","Data":"d7e9cb3f68aee9fd2d1e17f9aeba70f5150d0d6f1ce0146334aa2b875665e908"} Nov 25 23:54:10 crc kubenswrapper[5045]: E1125 23:54:10.718260 5045 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Nov 25 23:54:10 crc kubenswrapper[5045]: E1125 23:54:10.719335 5045 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-h54ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(44c7f66f-9267-4165-8d51-e8fa7b33b354): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 23:54:10 crc kubenswrapper[5045]: E1125 23:54:10.721597 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="44c7f66f-9267-4165-8d51-e8fa7b33b354" Nov 25 23:54:11 crc kubenswrapper[5045]: E1125 23:54:11.405561 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="44c7f66f-9267-4165-8d51-e8fa7b33b354" Nov 25 23:54:25 crc kubenswrapper[5045]: I1125 23:54:25.292216 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 25 23:54:27 crc kubenswrapper[5045]: I1125 23:54:27.578176 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"44c7f66f-9267-4165-8d51-e8fa7b33b354","Type":"ContainerStarted","Data":"33ee1ee4bc0a7a45d56a732bb257d72c8b40f74178ccc53bcff0e210243e2fc7"} Nov 25 23:54:27 crc kubenswrapper[5045]: I1125 23:54:27.609919 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=4.416271886 podStartE2EDuration="47.609890687s" podCreationTimestamp="2025-11-25 23:53:40 +0000 UTC" firstStartedPulling="2025-11-25 23:53:42.093797185 +0000 UTC m=+3278.451456297" lastFinishedPulling="2025-11-25 23:54:25.287415956 +0000 UTC m=+3321.645075098" observedRunningTime="2025-11-25 23:54:27.595170482 +0000 UTC m=+3323.952829614" watchObservedRunningTime="2025-11-25 23:54:27.609890687 +0000 UTC m=+3323.967549809" Nov 25 23:54:58 crc kubenswrapper[5045]: I1125 23:54:58.273011 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pbmvg"] Nov 25 23:54:58 crc kubenswrapper[5045]: I1125 23:54:58.275250 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pbmvg" Nov 25 23:54:58 crc kubenswrapper[5045]: I1125 23:54:58.316678 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pbmvg"] Nov 25 23:54:58 crc kubenswrapper[5045]: I1125 23:54:58.345322 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7s8q\" (UniqueName: \"kubernetes.io/projected/5e84f026-ec90-42dc-ba27-b65eae914c3b-kube-api-access-n7s8q\") pod \"redhat-marketplace-pbmvg\" (UID: \"5e84f026-ec90-42dc-ba27-b65eae914c3b\") " pod="openshift-marketplace/redhat-marketplace-pbmvg" Nov 25 23:54:58 crc kubenswrapper[5045]: I1125 23:54:58.345448 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e84f026-ec90-42dc-ba27-b65eae914c3b-utilities\") pod \"redhat-marketplace-pbmvg\" (UID: \"5e84f026-ec90-42dc-ba27-b65eae914c3b\") " pod="openshift-marketplace/redhat-marketplace-pbmvg" Nov 25 23:54:58 crc kubenswrapper[5045]: I1125 23:54:58.345534 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e84f026-ec90-42dc-ba27-b65eae914c3b-catalog-content\") pod \"redhat-marketplace-pbmvg\" (UID: \"5e84f026-ec90-42dc-ba27-b65eae914c3b\") " pod="openshift-marketplace/redhat-marketplace-pbmvg" Nov 25 23:54:58 crc kubenswrapper[5045]: I1125 23:54:58.446695 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7s8q\" (UniqueName: \"kubernetes.io/projected/5e84f026-ec90-42dc-ba27-b65eae914c3b-kube-api-access-n7s8q\") pod \"redhat-marketplace-pbmvg\" (UID: \"5e84f026-ec90-42dc-ba27-b65eae914c3b\") " pod="openshift-marketplace/redhat-marketplace-pbmvg" Nov 25 23:54:58 crc kubenswrapper[5045]: I1125 23:54:58.446876 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e84f026-ec90-42dc-ba27-b65eae914c3b-utilities\") pod \"redhat-marketplace-pbmvg\" (UID: \"5e84f026-ec90-42dc-ba27-b65eae914c3b\") " pod="openshift-marketplace/redhat-marketplace-pbmvg" Nov 25 23:54:58 crc kubenswrapper[5045]: I1125 23:54:58.447946 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e84f026-ec90-42dc-ba27-b65eae914c3b-catalog-content\") pod \"redhat-marketplace-pbmvg\" (UID: \"5e84f026-ec90-42dc-ba27-b65eae914c3b\") " pod="openshift-marketplace/redhat-marketplace-pbmvg" Nov 25 23:54:58 crc kubenswrapper[5045]: I1125 23:54:58.448180 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e84f026-ec90-42dc-ba27-b65eae914c3b-utilities\") pod \"redhat-marketplace-pbmvg\" (UID: \"5e84f026-ec90-42dc-ba27-b65eae914c3b\") " pod="openshift-marketplace/redhat-marketplace-pbmvg" Nov 25 23:54:58 crc kubenswrapper[5045]: I1125 23:54:58.448634 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e84f026-ec90-42dc-ba27-b65eae914c3b-catalog-content\") pod \"redhat-marketplace-pbmvg\" (UID: \"5e84f026-ec90-42dc-ba27-b65eae914c3b\") " pod="openshift-marketplace/redhat-marketplace-pbmvg" Nov 25 23:54:58 crc kubenswrapper[5045]: I1125 23:54:58.472758 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7s8q\" (UniqueName: \"kubernetes.io/projected/5e84f026-ec90-42dc-ba27-b65eae914c3b-kube-api-access-n7s8q\") pod \"redhat-marketplace-pbmvg\" (UID: \"5e84f026-ec90-42dc-ba27-b65eae914c3b\") " pod="openshift-marketplace/redhat-marketplace-pbmvg" Nov 25 23:54:58 crc kubenswrapper[5045]: I1125 23:54:58.594321 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pbmvg" Nov 25 23:54:59 crc kubenswrapper[5045]: I1125 23:54:59.123144 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pbmvg"] Nov 25 23:54:59 crc kubenswrapper[5045]: I1125 23:54:59.932238 5045 generic.go:334] "Generic (PLEG): container finished" podID="5e84f026-ec90-42dc-ba27-b65eae914c3b" containerID="686aa068e7aca029bbccb9cfeda239648c6e9b29ba90157bbabd95fee9957b15" exitCode=0 Nov 25 23:54:59 crc kubenswrapper[5045]: I1125 23:54:59.934792 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pbmvg" event={"ID":"5e84f026-ec90-42dc-ba27-b65eae914c3b","Type":"ContainerDied","Data":"686aa068e7aca029bbccb9cfeda239648c6e9b29ba90157bbabd95fee9957b15"} Nov 25 23:54:59 crc kubenswrapper[5045]: I1125 23:54:59.934863 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pbmvg" event={"ID":"5e84f026-ec90-42dc-ba27-b65eae914c3b","Type":"ContainerStarted","Data":"bf50a845b8062467230381da237b04200fe5e34a1a62e3e7dfaf003d09d5f233"} Nov 25 23:55:01 crc kubenswrapper[5045]: I1125 23:55:01.958060 5045 generic.go:334] "Generic (PLEG): container finished" podID="5e84f026-ec90-42dc-ba27-b65eae914c3b" containerID="fa253393224e4446c540d3c6e84b4e7eff50ff0409d7f97566a8a443f3481957" exitCode=0 Nov 25 23:55:01 crc kubenswrapper[5045]: I1125 23:55:01.958239 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pbmvg" event={"ID":"5e84f026-ec90-42dc-ba27-b65eae914c3b","Type":"ContainerDied","Data":"fa253393224e4446c540d3c6e84b4e7eff50ff0409d7f97566a8a443f3481957"} Nov 25 23:55:04 crc kubenswrapper[5045]: I1125 23:55:04.994850 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pbmvg" event={"ID":"5e84f026-ec90-42dc-ba27-b65eae914c3b","Type":"ContainerStarted","Data":"290a7997e89ec1bf645c6f8bceb6045a9a937493dd30222a4b6914c9f49bd385"} Nov 25 23:55:05 crc kubenswrapper[5045]: I1125 23:55:05.027551 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pbmvg" podStartSLOduration=3.031769015 podStartE2EDuration="7.027521339s" podCreationTimestamp="2025-11-25 23:54:58 +0000 UTC" firstStartedPulling="2025-11-25 23:54:59.934299632 +0000 UTC m=+3356.291958744" lastFinishedPulling="2025-11-25 23:55:03.930051956 +0000 UTC m=+3360.287711068" observedRunningTime="2025-11-25 23:55:05.014054659 +0000 UTC m=+3361.371713771" watchObservedRunningTime="2025-11-25 23:55:05.027521339 +0000 UTC m=+3361.385180481" Nov 25 23:55:08 crc kubenswrapper[5045]: I1125 23:55:08.594774 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pbmvg" Nov 25 23:55:08 crc kubenswrapper[5045]: I1125 23:55:08.595359 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pbmvg" Nov 25 23:55:09 crc kubenswrapper[5045]: I1125 23:55:09.682994 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-pbmvg" podUID="5e84f026-ec90-42dc-ba27-b65eae914c3b" containerName="registry-server" probeResult="failure" output=< Nov 25 23:55:09 crc kubenswrapper[5045]: timeout: failed to connect service ":50051" within 1s Nov 25 23:55:09 crc kubenswrapper[5045]: > Nov 25 23:55:18 crc kubenswrapper[5045]: I1125 23:55:18.649620 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pbmvg" Nov 25 23:55:18 crc kubenswrapper[5045]: I1125 23:55:18.705129 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pbmvg" Nov 25 23:55:18 crc kubenswrapper[5045]: I1125 23:55:18.902849 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pbmvg"] Nov 25 23:55:20 crc kubenswrapper[5045]: I1125 23:55:20.146816 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pbmvg" podUID="5e84f026-ec90-42dc-ba27-b65eae914c3b" containerName="registry-server" containerID="cri-o://290a7997e89ec1bf645c6f8bceb6045a9a937493dd30222a4b6914c9f49bd385" gracePeriod=2 Nov 25 23:55:20 crc kubenswrapper[5045]: I1125 23:55:20.923849 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pbmvg" Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.026276 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n7s8q\" (UniqueName: \"kubernetes.io/projected/5e84f026-ec90-42dc-ba27-b65eae914c3b-kube-api-access-n7s8q\") pod \"5e84f026-ec90-42dc-ba27-b65eae914c3b\" (UID: \"5e84f026-ec90-42dc-ba27-b65eae914c3b\") " Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.026351 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e84f026-ec90-42dc-ba27-b65eae914c3b-utilities\") pod \"5e84f026-ec90-42dc-ba27-b65eae914c3b\" (UID: \"5e84f026-ec90-42dc-ba27-b65eae914c3b\") " Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.026502 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e84f026-ec90-42dc-ba27-b65eae914c3b-catalog-content\") pod \"5e84f026-ec90-42dc-ba27-b65eae914c3b\" (UID: \"5e84f026-ec90-42dc-ba27-b65eae914c3b\") " Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.027938 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e84f026-ec90-42dc-ba27-b65eae914c3b-utilities" (OuterVolumeSpecName: "utilities") pod "5e84f026-ec90-42dc-ba27-b65eae914c3b" (UID: "5e84f026-ec90-42dc-ba27-b65eae914c3b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.033388 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e84f026-ec90-42dc-ba27-b65eae914c3b-kube-api-access-n7s8q" (OuterVolumeSpecName: "kube-api-access-n7s8q") pod "5e84f026-ec90-42dc-ba27-b65eae914c3b" (UID: "5e84f026-ec90-42dc-ba27-b65eae914c3b"). InnerVolumeSpecName "kube-api-access-n7s8q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.045739 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e84f026-ec90-42dc-ba27-b65eae914c3b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5e84f026-ec90-42dc-ba27-b65eae914c3b" (UID: "5e84f026-ec90-42dc-ba27-b65eae914c3b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.128834 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n7s8q\" (UniqueName: \"kubernetes.io/projected/5e84f026-ec90-42dc-ba27-b65eae914c3b-kube-api-access-n7s8q\") on node \"crc\" DevicePath \"\"" Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.128866 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e84f026-ec90-42dc-ba27-b65eae914c3b-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.128875 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e84f026-ec90-42dc-ba27-b65eae914c3b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.159142 5045 generic.go:334] "Generic (PLEG): container finished" podID="5e84f026-ec90-42dc-ba27-b65eae914c3b" containerID="290a7997e89ec1bf645c6f8bceb6045a9a937493dd30222a4b6914c9f49bd385" exitCode=0 Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.159190 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pbmvg" event={"ID":"5e84f026-ec90-42dc-ba27-b65eae914c3b","Type":"ContainerDied","Data":"290a7997e89ec1bf645c6f8bceb6045a9a937493dd30222a4b6914c9f49bd385"} Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.159216 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pbmvg" event={"ID":"5e84f026-ec90-42dc-ba27-b65eae914c3b","Type":"ContainerDied","Data":"bf50a845b8062467230381da237b04200fe5e34a1a62e3e7dfaf003d09d5f233"} Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.159233 5045 scope.go:117] "RemoveContainer" containerID="290a7997e89ec1bf645c6f8bceb6045a9a937493dd30222a4b6914c9f49bd385" Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.159309 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pbmvg" Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.183432 5045 scope.go:117] "RemoveContainer" containerID="fa253393224e4446c540d3c6e84b4e7eff50ff0409d7f97566a8a443f3481957" Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.219109 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pbmvg"] Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.227421 5045 scope.go:117] "RemoveContainer" containerID="686aa068e7aca029bbccb9cfeda239648c6e9b29ba90157bbabd95fee9957b15" Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.230860 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pbmvg"] Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.269164 5045 scope.go:117] "RemoveContainer" containerID="290a7997e89ec1bf645c6f8bceb6045a9a937493dd30222a4b6914c9f49bd385" Nov 25 23:55:21 crc kubenswrapper[5045]: E1125 23:55:21.269651 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"290a7997e89ec1bf645c6f8bceb6045a9a937493dd30222a4b6914c9f49bd385\": container with ID starting with 290a7997e89ec1bf645c6f8bceb6045a9a937493dd30222a4b6914c9f49bd385 not found: ID does not exist" containerID="290a7997e89ec1bf645c6f8bceb6045a9a937493dd30222a4b6914c9f49bd385" Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.269694 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"290a7997e89ec1bf645c6f8bceb6045a9a937493dd30222a4b6914c9f49bd385"} err="failed to get container status \"290a7997e89ec1bf645c6f8bceb6045a9a937493dd30222a4b6914c9f49bd385\": rpc error: code = NotFound desc = could not find container \"290a7997e89ec1bf645c6f8bceb6045a9a937493dd30222a4b6914c9f49bd385\": container with ID starting with 290a7997e89ec1bf645c6f8bceb6045a9a937493dd30222a4b6914c9f49bd385 not found: ID does not exist" Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.269737 5045 scope.go:117] "RemoveContainer" containerID="fa253393224e4446c540d3c6e84b4e7eff50ff0409d7f97566a8a443f3481957" Nov 25 23:55:21 crc kubenswrapper[5045]: E1125 23:55:21.270038 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa253393224e4446c540d3c6e84b4e7eff50ff0409d7f97566a8a443f3481957\": container with ID starting with fa253393224e4446c540d3c6e84b4e7eff50ff0409d7f97566a8a443f3481957 not found: ID does not exist" containerID="fa253393224e4446c540d3c6e84b4e7eff50ff0409d7f97566a8a443f3481957" Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.270069 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa253393224e4446c540d3c6e84b4e7eff50ff0409d7f97566a8a443f3481957"} err="failed to get container status \"fa253393224e4446c540d3c6e84b4e7eff50ff0409d7f97566a8a443f3481957\": rpc error: code = NotFound desc = could not find container \"fa253393224e4446c540d3c6e84b4e7eff50ff0409d7f97566a8a443f3481957\": container with ID starting with fa253393224e4446c540d3c6e84b4e7eff50ff0409d7f97566a8a443f3481957 not found: ID does not exist" Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.270090 5045 scope.go:117] "RemoveContainer" containerID="686aa068e7aca029bbccb9cfeda239648c6e9b29ba90157bbabd95fee9957b15" Nov 25 23:55:21 crc kubenswrapper[5045]: E1125 23:55:21.270316 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"686aa068e7aca029bbccb9cfeda239648c6e9b29ba90157bbabd95fee9957b15\": container with ID starting with 686aa068e7aca029bbccb9cfeda239648c6e9b29ba90157bbabd95fee9957b15 not found: ID does not exist" containerID="686aa068e7aca029bbccb9cfeda239648c6e9b29ba90157bbabd95fee9957b15" Nov 25 23:55:21 crc kubenswrapper[5045]: I1125 23:55:21.270347 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"686aa068e7aca029bbccb9cfeda239648c6e9b29ba90157bbabd95fee9957b15"} err="failed to get container status \"686aa068e7aca029bbccb9cfeda239648c6e9b29ba90157bbabd95fee9957b15\": rpc error: code = NotFound desc = could not find container \"686aa068e7aca029bbccb9cfeda239648c6e9b29ba90157bbabd95fee9957b15\": container with ID starting with 686aa068e7aca029bbccb9cfeda239648c6e9b29ba90157bbabd95fee9957b15 not found: ID does not exist" Nov 25 23:55:22 crc kubenswrapper[5045]: I1125 23:55:22.410120 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e84f026-ec90-42dc-ba27-b65eae914c3b" path="/var/lib/kubelet/pods/5e84f026-ec90-42dc-ba27-b65eae914c3b/volumes" Nov 25 23:55:24 crc kubenswrapper[5045]: I1125 23:55:24.304087 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-p2msh"] Nov 25 23:55:24 crc kubenswrapper[5045]: E1125 23:55:24.304760 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e84f026-ec90-42dc-ba27-b65eae914c3b" containerName="registry-server" Nov 25 23:55:24 crc kubenswrapper[5045]: I1125 23:55:24.304771 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e84f026-ec90-42dc-ba27-b65eae914c3b" containerName="registry-server" Nov 25 23:55:24 crc kubenswrapper[5045]: E1125 23:55:24.304794 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e84f026-ec90-42dc-ba27-b65eae914c3b" containerName="extract-content" Nov 25 23:55:24 crc kubenswrapper[5045]: I1125 23:55:24.304800 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e84f026-ec90-42dc-ba27-b65eae914c3b" containerName="extract-content" Nov 25 23:55:24 crc kubenswrapper[5045]: E1125 23:55:24.304825 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e84f026-ec90-42dc-ba27-b65eae914c3b" containerName="extract-utilities" Nov 25 23:55:24 crc kubenswrapper[5045]: I1125 23:55:24.304831 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e84f026-ec90-42dc-ba27-b65eae914c3b" containerName="extract-utilities" Nov 25 23:55:24 crc kubenswrapper[5045]: I1125 23:55:24.305001 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e84f026-ec90-42dc-ba27-b65eae914c3b" containerName="registry-server" Nov 25 23:55:24 crc kubenswrapper[5045]: I1125 23:55:24.306265 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p2msh" Nov 25 23:55:24 crc kubenswrapper[5045]: I1125 23:55:24.325700 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p2msh"] Nov 25 23:55:24 crc kubenswrapper[5045]: I1125 23:55:24.396206 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/999e83f8-81ba-46b4-ab74-b6aabe118d5b-utilities\") pod \"redhat-operators-p2msh\" (UID: \"999e83f8-81ba-46b4-ab74-b6aabe118d5b\") " pod="openshift-marketplace/redhat-operators-p2msh" Nov 25 23:55:24 crc kubenswrapper[5045]: I1125 23:55:24.396350 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/999e83f8-81ba-46b4-ab74-b6aabe118d5b-catalog-content\") pod \"redhat-operators-p2msh\" (UID: \"999e83f8-81ba-46b4-ab74-b6aabe118d5b\") " pod="openshift-marketplace/redhat-operators-p2msh" Nov 25 23:55:24 crc kubenswrapper[5045]: I1125 23:55:24.396420 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4sz9h\" (UniqueName: \"kubernetes.io/projected/999e83f8-81ba-46b4-ab74-b6aabe118d5b-kube-api-access-4sz9h\") pod \"redhat-operators-p2msh\" (UID: \"999e83f8-81ba-46b4-ab74-b6aabe118d5b\") " pod="openshift-marketplace/redhat-operators-p2msh" Nov 25 23:55:24 crc kubenswrapper[5045]: I1125 23:55:24.499435 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/999e83f8-81ba-46b4-ab74-b6aabe118d5b-catalog-content\") pod \"redhat-operators-p2msh\" (UID: \"999e83f8-81ba-46b4-ab74-b6aabe118d5b\") " pod="openshift-marketplace/redhat-operators-p2msh" Nov 25 23:55:24 crc kubenswrapper[5045]: I1125 23:55:24.499593 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4sz9h\" (UniqueName: \"kubernetes.io/projected/999e83f8-81ba-46b4-ab74-b6aabe118d5b-kube-api-access-4sz9h\") pod \"redhat-operators-p2msh\" (UID: \"999e83f8-81ba-46b4-ab74-b6aabe118d5b\") " pod="openshift-marketplace/redhat-operators-p2msh" Nov 25 23:55:24 crc kubenswrapper[5045]: I1125 23:55:24.499665 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/999e83f8-81ba-46b4-ab74-b6aabe118d5b-utilities\") pod \"redhat-operators-p2msh\" (UID: \"999e83f8-81ba-46b4-ab74-b6aabe118d5b\") " pod="openshift-marketplace/redhat-operators-p2msh" Nov 25 23:55:24 crc kubenswrapper[5045]: I1125 23:55:24.500622 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/999e83f8-81ba-46b4-ab74-b6aabe118d5b-catalog-content\") pod \"redhat-operators-p2msh\" (UID: \"999e83f8-81ba-46b4-ab74-b6aabe118d5b\") " pod="openshift-marketplace/redhat-operators-p2msh" Nov 25 23:55:24 crc kubenswrapper[5045]: I1125 23:55:24.500903 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/999e83f8-81ba-46b4-ab74-b6aabe118d5b-utilities\") pod \"redhat-operators-p2msh\" (UID: \"999e83f8-81ba-46b4-ab74-b6aabe118d5b\") " pod="openshift-marketplace/redhat-operators-p2msh" Nov 25 23:55:24 crc kubenswrapper[5045]: I1125 23:55:24.521346 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4sz9h\" (UniqueName: \"kubernetes.io/projected/999e83f8-81ba-46b4-ab74-b6aabe118d5b-kube-api-access-4sz9h\") pod \"redhat-operators-p2msh\" (UID: \"999e83f8-81ba-46b4-ab74-b6aabe118d5b\") " pod="openshift-marketplace/redhat-operators-p2msh" Nov 25 23:55:24 crc kubenswrapper[5045]: I1125 23:55:24.636604 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p2msh" Nov 25 23:55:25 crc kubenswrapper[5045]: I1125 23:55:25.111390 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p2msh"] Nov 25 23:55:25 crc kubenswrapper[5045]: W1125 23:55:25.115985 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod999e83f8_81ba_46b4_ab74_b6aabe118d5b.slice/crio-5e268b66df2db66d284297670b4578150325067b08651902d60e7b77f75aeefc WatchSource:0}: Error finding container 5e268b66df2db66d284297670b4578150325067b08651902d60e7b77f75aeefc: Status 404 returned error can't find the container with id 5e268b66df2db66d284297670b4578150325067b08651902d60e7b77f75aeefc Nov 25 23:55:25 crc kubenswrapper[5045]: I1125 23:55:25.194611 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p2msh" event={"ID":"999e83f8-81ba-46b4-ab74-b6aabe118d5b","Type":"ContainerStarted","Data":"5e268b66df2db66d284297670b4578150325067b08651902d60e7b77f75aeefc"} Nov 25 23:55:26 crc kubenswrapper[5045]: I1125 23:55:26.205462 5045 generic.go:334] "Generic (PLEG): container finished" podID="999e83f8-81ba-46b4-ab74-b6aabe118d5b" containerID="fa8fbfbe067e92b89d8d83c218570ace531d789921bfc289ec0272c7740ab523" exitCode=0 Nov 25 23:55:26 crc kubenswrapper[5045]: I1125 23:55:26.205691 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p2msh" event={"ID":"999e83f8-81ba-46b4-ab74-b6aabe118d5b","Type":"ContainerDied","Data":"fa8fbfbe067e92b89d8d83c218570ace531d789921bfc289ec0272c7740ab523"} Nov 25 23:55:27 crc kubenswrapper[5045]: I1125 23:55:27.217764 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p2msh" event={"ID":"999e83f8-81ba-46b4-ab74-b6aabe118d5b","Type":"ContainerStarted","Data":"a3cf1474fd1ac4abc12f0b1f1b5eaf911718e4b8c8795931edafca6009f168a9"} Nov 25 23:55:30 crc kubenswrapper[5045]: I1125 23:55:30.257157 5045 generic.go:334] "Generic (PLEG): container finished" podID="999e83f8-81ba-46b4-ab74-b6aabe118d5b" containerID="a3cf1474fd1ac4abc12f0b1f1b5eaf911718e4b8c8795931edafca6009f168a9" exitCode=0 Nov 25 23:55:30 crc kubenswrapper[5045]: I1125 23:55:30.258097 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p2msh" event={"ID":"999e83f8-81ba-46b4-ab74-b6aabe118d5b","Type":"ContainerDied","Data":"a3cf1474fd1ac4abc12f0b1f1b5eaf911718e4b8c8795931edafca6009f168a9"} Nov 25 23:55:31 crc kubenswrapper[5045]: I1125 23:55:31.268843 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p2msh" event={"ID":"999e83f8-81ba-46b4-ab74-b6aabe118d5b","Type":"ContainerStarted","Data":"c0bec5f60f62d0f54d4987d1240b466d3998abd8a5954650d049937b1fc92f30"} Nov 25 23:55:31 crc kubenswrapper[5045]: I1125 23:55:31.290903 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-p2msh" podStartSLOduration=2.471346315 podStartE2EDuration="7.290885431s" podCreationTimestamp="2025-11-25 23:55:24 +0000 UTC" firstStartedPulling="2025-11-25 23:55:26.209607593 +0000 UTC m=+3382.567266745" lastFinishedPulling="2025-11-25 23:55:31.029146749 +0000 UTC m=+3387.386805861" observedRunningTime="2025-11-25 23:55:31.289166623 +0000 UTC m=+3387.646825735" watchObservedRunningTime="2025-11-25 23:55:31.290885431 +0000 UTC m=+3387.648544543" Nov 25 23:55:34 crc kubenswrapper[5045]: I1125 23:55:34.636675 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-p2msh" Nov 25 23:55:34 crc kubenswrapper[5045]: I1125 23:55:34.637922 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-p2msh" Nov 25 23:55:35 crc kubenswrapper[5045]: I1125 23:55:35.698043 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-p2msh" podUID="999e83f8-81ba-46b4-ab74-b6aabe118d5b" containerName="registry-server" probeResult="failure" output=< Nov 25 23:55:35 crc kubenswrapper[5045]: timeout: failed to connect service ":50051" within 1s Nov 25 23:55:35 crc kubenswrapper[5045]: > Nov 25 23:55:41 crc kubenswrapper[5045]: I1125 23:55:41.639343 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-q4ln8"] Nov 25 23:55:41 crc kubenswrapper[5045]: I1125 23:55:41.642259 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q4ln8" Nov 25 23:55:41 crc kubenswrapper[5045]: I1125 23:55:41.656526 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q4ln8"] Nov 25 23:55:41 crc kubenswrapper[5045]: I1125 23:55:41.685559 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b735baca-6822-4c99-8feb-c9823dba65e9-utilities\") pod \"certified-operators-q4ln8\" (UID: \"b735baca-6822-4c99-8feb-c9823dba65e9\") " pod="openshift-marketplace/certified-operators-q4ln8" Nov 25 23:55:41 crc kubenswrapper[5045]: I1125 23:55:41.685666 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cnq2\" (UniqueName: \"kubernetes.io/projected/b735baca-6822-4c99-8feb-c9823dba65e9-kube-api-access-2cnq2\") pod \"certified-operators-q4ln8\" (UID: \"b735baca-6822-4c99-8feb-c9823dba65e9\") " pod="openshift-marketplace/certified-operators-q4ln8" Nov 25 23:55:41 crc kubenswrapper[5045]: I1125 23:55:41.685854 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b735baca-6822-4c99-8feb-c9823dba65e9-catalog-content\") pod \"certified-operators-q4ln8\" (UID: \"b735baca-6822-4c99-8feb-c9823dba65e9\") " pod="openshift-marketplace/certified-operators-q4ln8" Nov 25 23:55:41 crc kubenswrapper[5045]: I1125 23:55:41.787723 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b735baca-6822-4c99-8feb-c9823dba65e9-utilities\") pod \"certified-operators-q4ln8\" (UID: \"b735baca-6822-4c99-8feb-c9823dba65e9\") " pod="openshift-marketplace/certified-operators-q4ln8" Nov 25 23:55:41 crc kubenswrapper[5045]: I1125 23:55:41.788004 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cnq2\" (UniqueName: \"kubernetes.io/projected/b735baca-6822-4c99-8feb-c9823dba65e9-kube-api-access-2cnq2\") pod \"certified-operators-q4ln8\" (UID: \"b735baca-6822-4c99-8feb-c9823dba65e9\") " pod="openshift-marketplace/certified-operators-q4ln8" Nov 25 23:55:41 crc kubenswrapper[5045]: I1125 23:55:41.788062 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b735baca-6822-4c99-8feb-c9823dba65e9-catalog-content\") pod \"certified-operators-q4ln8\" (UID: \"b735baca-6822-4c99-8feb-c9823dba65e9\") " pod="openshift-marketplace/certified-operators-q4ln8" Nov 25 23:55:41 crc kubenswrapper[5045]: I1125 23:55:41.788191 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b735baca-6822-4c99-8feb-c9823dba65e9-utilities\") pod \"certified-operators-q4ln8\" (UID: \"b735baca-6822-4c99-8feb-c9823dba65e9\") " pod="openshift-marketplace/certified-operators-q4ln8" Nov 25 23:55:41 crc kubenswrapper[5045]: I1125 23:55:41.788486 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b735baca-6822-4c99-8feb-c9823dba65e9-catalog-content\") pod \"certified-operators-q4ln8\" (UID: \"b735baca-6822-4c99-8feb-c9823dba65e9\") " pod="openshift-marketplace/certified-operators-q4ln8" Nov 25 23:55:41 crc kubenswrapper[5045]: I1125 23:55:41.813669 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cnq2\" (UniqueName: \"kubernetes.io/projected/b735baca-6822-4c99-8feb-c9823dba65e9-kube-api-access-2cnq2\") pod \"certified-operators-q4ln8\" (UID: \"b735baca-6822-4c99-8feb-c9823dba65e9\") " pod="openshift-marketplace/certified-operators-q4ln8" Nov 25 23:55:41 crc kubenswrapper[5045]: I1125 23:55:41.971586 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q4ln8" Nov 25 23:55:42 crc kubenswrapper[5045]: I1125 23:55:42.518492 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q4ln8"] Nov 25 23:55:43 crc kubenswrapper[5045]: I1125 23:55:43.394579 5045 generic.go:334] "Generic (PLEG): container finished" podID="b735baca-6822-4c99-8feb-c9823dba65e9" containerID="3e25af9c48ce566b3d391c9b568fa8a41d6338c4d6334f876f4357d6a2a9e408" exitCode=0 Nov 25 23:55:43 crc kubenswrapper[5045]: I1125 23:55:43.394620 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q4ln8" event={"ID":"b735baca-6822-4c99-8feb-c9823dba65e9","Type":"ContainerDied","Data":"3e25af9c48ce566b3d391c9b568fa8a41d6338c4d6334f876f4357d6a2a9e408"} Nov 25 23:55:43 crc kubenswrapper[5045]: I1125 23:55:43.395106 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q4ln8" event={"ID":"b735baca-6822-4c99-8feb-c9823dba65e9","Type":"ContainerStarted","Data":"163bf36b147f59d76ec4535349b692c6bcb485f63b26001e83e439acd3f37de3"} Nov 25 23:55:44 crc kubenswrapper[5045]: I1125 23:55:44.711871 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-p2msh" Nov 25 23:55:44 crc kubenswrapper[5045]: I1125 23:55:44.788274 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-p2msh" Nov 25 23:55:45 crc kubenswrapper[5045]: E1125 23:55:45.057720 5045 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb735baca_6822_4c99_8feb_c9823dba65e9.slice/crio-conmon-5123ca0295faeae9447525a3f392ac3c87742a1f093b5bd1c2d65f4bfddc5dce.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb735baca_6822_4c99_8feb_c9823dba65e9.slice/crio-5123ca0295faeae9447525a3f392ac3c87742a1f093b5bd1c2d65f4bfddc5dce.scope\": RecentStats: unable to find data in memory cache]" Nov 25 23:55:45 crc kubenswrapper[5045]: I1125 23:55:45.421206 5045 generic.go:334] "Generic (PLEG): container finished" podID="b735baca-6822-4c99-8feb-c9823dba65e9" containerID="5123ca0295faeae9447525a3f392ac3c87742a1f093b5bd1c2d65f4bfddc5dce" exitCode=0 Nov 25 23:55:45 crc kubenswrapper[5045]: I1125 23:55:45.421251 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q4ln8" event={"ID":"b735baca-6822-4c99-8feb-c9823dba65e9","Type":"ContainerDied","Data":"5123ca0295faeae9447525a3f392ac3c87742a1f093b5bd1c2d65f4bfddc5dce"} Nov 25 23:55:46 crc kubenswrapper[5045]: I1125 23:55:46.412961 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p2msh"] Nov 25 23:55:46 crc kubenswrapper[5045]: I1125 23:55:46.434448 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-p2msh" podUID="999e83f8-81ba-46b4-ab74-b6aabe118d5b" containerName="registry-server" containerID="cri-o://c0bec5f60f62d0f54d4987d1240b466d3998abd8a5954650d049937b1fc92f30" gracePeriod=2 Nov 25 23:55:47 crc kubenswrapper[5045]: I1125 23:55:47.450579 5045 generic.go:334] "Generic (PLEG): container finished" podID="999e83f8-81ba-46b4-ab74-b6aabe118d5b" containerID="c0bec5f60f62d0f54d4987d1240b466d3998abd8a5954650d049937b1fc92f30" exitCode=0 Nov 25 23:55:47 crc kubenswrapper[5045]: I1125 23:55:47.450698 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p2msh" event={"ID":"999e83f8-81ba-46b4-ab74-b6aabe118d5b","Type":"ContainerDied","Data":"c0bec5f60f62d0f54d4987d1240b466d3998abd8a5954650d049937b1fc92f30"} Nov 25 23:55:47 crc kubenswrapper[5045]: I1125 23:55:47.862408 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p2msh" Nov 25 23:55:47 crc kubenswrapper[5045]: I1125 23:55:47.939121 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4sz9h\" (UniqueName: \"kubernetes.io/projected/999e83f8-81ba-46b4-ab74-b6aabe118d5b-kube-api-access-4sz9h\") pod \"999e83f8-81ba-46b4-ab74-b6aabe118d5b\" (UID: \"999e83f8-81ba-46b4-ab74-b6aabe118d5b\") " Nov 25 23:55:47 crc kubenswrapper[5045]: I1125 23:55:47.939512 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/999e83f8-81ba-46b4-ab74-b6aabe118d5b-utilities\") pod \"999e83f8-81ba-46b4-ab74-b6aabe118d5b\" (UID: \"999e83f8-81ba-46b4-ab74-b6aabe118d5b\") " Nov 25 23:55:47 crc kubenswrapper[5045]: I1125 23:55:47.939574 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/999e83f8-81ba-46b4-ab74-b6aabe118d5b-catalog-content\") pod \"999e83f8-81ba-46b4-ab74-b6aabe118d5b\" (UID: \"999e83f8-81ba-46b4-ab74-b6aabe118d5b\") " Nov 25 23:55:47 crc kubenswrapper[5045]: I1125 23:55:47.940354 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/999e83f8-81ba-46b4-ab74-b6aabe118d5b-utilities" (OuterVolumeSpecName: "utilities") pod "999e83f8-81ba-46b4-ab74-b6aabe118d5b" (UID: "999e83f8-81ba-46b4-ab74-b6aabe118d5b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:55:47 crc kubenswrapper[5045]: I1125 23:55:47.940585 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/999e83f8-81ba-46b4-ab74-b6aabe118d5b-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:55:47 crc kubenswrapper[5045]: I1125 23:55:47.945545 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/999e83f8-81ba-46b4-ab74-b6aabe118d5b-kube-api-access-4sz9h" (OuterVolumeSpecName: "kube-api-access-4sz9h") pod "999e83f8-81ba-46b4-ab74-b6aabe118d5b" (UID: "999e83f8-81ba-46b4-ab74-b6aabe118d5b"). InnerVolumeSpecName "kube-api-access-4sz9h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:55:48 crc kubenswrapper[5045]: I1125 23:55:48.018860 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/999e83f8-81ba-46b4-ab74-b6aabe118d5b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "999e83f8-81ba-46b4-ab74-b6aabe118d5b" (UID: "999e83f8-81ba-46b4-ab74-b6aabe118d5b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:55:48 crc kubenswrapper[5045]: I1125 23:55:48.045609 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4sz9h\" (UniqueName: \"kubernetes.io/projected/999e83f8-81ba-46b4-ab74-b6aabe118d5b-kube-api-access-4sz9h\") on node \"crc\" DevicePath \"\"" Nov 25 23:55:48 crc kubenswrapper[5045]: I1125 23:55:48.045657 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/999e83f8-81ba-46b4-ab74-b6aabe118d5b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:55:48 crc kubenswrapper[5045]: I1125 23:55:48.463506 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p2msh" event={"ID":"999e83f8-81ba-46b4-ab74-b6aabe118d5b","Type":"ContainerDied","Data":"5e268b66df2db66d284297670b4578150325067b08651902d60e7b77f75aeefc"} Nov 25 23:55:48 crc kubenswrapper[5045]: I1125 23:55:48.463566 5045 scope.go:117] "RemoveContainer" containerID="c0bec5f60f62d0f54d4987d1240b466d3998abd8a5954650d049937b1fc92f30" Nov 25 23:55:48 crc kubenswrapper[5045]: I1125 23:55:48.463590 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p2msh" Nov 25 23:55:48 crc kubenswrapper[5045]: I1125 23:55:48.468162 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q4ln8" event={"ID":"b735baca-6822-4c99-8feb-c9823dba65e9","Type":"ContainerStarted","Data":"423d7edc9e6a4d99b88ce6e2c5de180a06e432713ac39d444de913b9924108d9"} Nov 25 23:55:48 crc kubenswrapper[5045]: I1125 23:55:48.489904 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-q4ln8" podStartSLOduration=3.6629818800000002 podStartE2EDuration="7.489879431s" podCreationTimestamp="2025-11-25 23:55:41 +0000 UTC" firstStartedPulling="2025-11-25 23:55:43.397481209 +0000 UTC m=+3399.755140321" lastFinishedPulling="2025-11-25 23:55:47.22437876 +0000 UTC m=+3403.582037872" observedRunningTime="2025-11-25 23:55:48.488668327 +0000 UTC m=+3404.846327439" watchObservedRunningTime="2025-11-25 23:55:48.489879431 +0000 UTC m=+3404.847538543" Nov 25 23:55:48 crc kubenswrapper[5045]: I1125 23:55:48.506275 5045 scope.go:117] "RemoveContainer" containerID="a3cf1474fd1ac4abc12f0b1f1b5eaf911718e4b8c8795931edafca6009f168a9" Nov 25 23:55:48 crc kubenswrapper[5045]: I1125 23:55:48.515209 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p2msh"] Nov 25 23:55:48 crc kubenswrapper[5045]: I1125 23:55:48.523542 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-p2msh"] Nov 25 23:55:48 crc kubenswrapper[5045]: I1125 23:55:48.534742 5045 scope.go:117] "RemoveContainer" containerID="fa8fbfbe067e92b89d8d83c218570ace531d789921bfc289ec0272c7740ab523" Nov 25 23:55:50 crc kubenswrapper[5045]: I1125 23:55:50.410951 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="999e83f8-81ba-46b4-ab74-b6aabe118d5b" path="/var/lib/kubelet/pods/999e83f8-81ba-46b4-ab74-b6aabe118d5b/volumes" Nov 25 23:55:51 crc kubenswrapper[5045]: I1125 23:55:51.972944 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-q4ln8" Nov 25 23:55:51 crc kubenswrapper[5045]: I1125 23:55:51.973018 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-q4ln8" Nov 25 23:55:52 crc kubenswrapper[5045]: I1125 23:55:52.042169 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-q4ln8" Nov 25 23:55:52 crc kubenswrapper[5045]: I1125 23:55:52.578666 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-q4ln8" Nov 25 23:55:53 crc kubenswrapper[5045]: I1125 23:55:53.202507 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-q4ln8"] Nov 25 23:55:54 crc kubenswrapper[5045]: I1125 23:55:54.526540 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-q4ln8" podUID="b735baca-6822-4c99-8feb-c9823dba65e9" containerName="registry-server" containerID="cri-o://423d7edc9e6a4d99b88ce6e2c5de180a06e432713ac39d444de913b9924108d9" gracePeriod=2 Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.023528 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q4ln8" Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.094661 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b735baca-6822-4c99-8feb-c9823dba65e9-catalog-content\") pod \"b735baca-6822-4c99-8feb-c9823dba65e9\" (UID: \"b735baca-6822-4c99-8feb-c9823dba65e9\") " Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.095143 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2cnq2\" (UniqueName: \"kubernetes.io/projected/b735baca-6822-4c99-8feb-c9823dba65e9-kube-api-access-2cnq2\") pod \"b735baca-6822-4c99-8feb-c9823dba65e9\" (UID: \"b735baca-6822-4c99-8feb-c9823dba65e9\") " Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.095238 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b735baca-6822-4c99-8feb-c9823dba65e9-utilities\") pod \"b735baca-6822-4c99-8feb-c9823dba65e9\" (UID: \"b735baca-6822-4c99-8feb-c9823dba65e9\") " Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.096021 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b735baca-6822-4c99-8feb-c9823dba65e9-utilities" (OuterVolumeSpecName: "utilities") pod "b735baca-6822-4c99-8feb-c9823dba65e9" (UID: "b735baca-6822-4c99-8feb-c9823dba65e9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.113780 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b735baca-6822-4c99-8feb-c9823dba65e9-kube-api-access-2cnq2" (OuterVolumeSpecName: "kube-api-access-2cnq2") pod "b735baca-6822-4c99-8feb-c9823dba65e9" (UID: "b735baca-6822-4c99-8feb-c9823dba65e9"). InnerVolumeSpecName "kube-api-access-2cnq2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.144972 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b735baca-6822-4c99-8feb-c9823dba65e9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b735baca-6822-4c99-8feb-c9823dba65e9" (UID: "b735baca-6822-4c99-8feb-c9823dba65e9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.198899 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b735baca-6822-4c99-8feb-c9823dba65e9-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.198975 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b735baca-6822-4c99-8feb-c9823dba65e9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.199014 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2cnq2\" (UniqueName: \"kubernetes.io/projected/b735baca-6822-4c99-8feb-c9823dba65e9-kube-api-access-2cnq2\") on node \"crc\" DevicePath \"\"" Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.538991 5045 generic.go:334] "Generic (PLEG): container finished" podID="b735baca-6822-4c99-8feb-c9823dba65e9" containerID="423d7edc9e6a4d99b88ce6e2c5de180a06e432713ac39d444de913b9924108d9" exitCode=0 Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.539056 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q4ln8" event={"ID":"b735baca-6822-4c99-8feb-c9823dba65e9","Type":"ContainerDied","Data":"423d7edc9e6a4d99b88ce6e2c5de180a06e432713ac39d444de913b9924108d9"} Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.539108 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q4ln8" event={"ID":"b735baca-6822-4c99-8feb-c9823dba65e9","Type":"ContainerDied","Data":"163bf36b147f59d76ec4535349b692c6bcb485f63b26001e83e439acd3f37de3"} Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.539136 5045 scope.go:117] "RemoveContainer" containerID="423d7edc9e6a4d99b88ce6e2c5de180a06e432713ac39d444de913b9924108d9" Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.540377 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q4ln8" Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.573858 5045 scope.go:117] "RemoveContainer" containerID="5123ca0295faeae9447525a3f392ac3c87742a1f093b5bd1c2d65f4bfddc5dce" Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.592961 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-q4ln8"] Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.602375 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-q4ln8"] Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.623120 5045 scope.go:117] "RemoveContainer" containerID="3e25af9c48ce566b3d391c9b568fa8a41d6338c4d6334f876f4357d6a2a9e408" Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.646424 5045 scope.go:117] "RemoveContainer" containerID="423d7edc9e6a4d99b88ce6e2c5de180a06e432713ac39d444de913b9924108d9" Nov 25 23:55:55 crc kubenswrapper[5045]: E1125 23:55:55.646999 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"423d7edc9e6a4d99b88ce6e2c5de180a06e432713ac39d444de913b9924108d9\": container with ID starting with 423d7edc9e6a4d99b88ce6e2c5de180a06e432713ac39d444de913b9924108d9 not found: ID does not exist" containerID="423d7edc9e6a4d99b88ce6e2c5de180a06e432713ac39d444de913b9924108d9" Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.647052 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"423d7edc9e6a4d99b88ce6e2c5de180a06e432713ac39d444de913b9924108d9"} err="failed to get container status \"423d7edc9e6a4d99b88ce6e2c5de180a06e432713ac39d444de913b9924108d9\": rpc error: code = NotFound desc = could not find container \"423d7edc9e6a4d99b88ce6e2c5de180a06e432713ac39d444de913b9924108d9\": container with ID starting with 423d7edc9e6a4d99b88ce6e2c5de180a06e432713ac39d444de913b9924108d9 not found: ID does not exist" Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.647088 5045 scope.go:117] "RemoveContainer" containerID="5123ca0295faeae9447525a3f392ac3c87742a1f093b5bd1c2d65f4bfddc5dce" Nov 25 23:55:55 crc kubenswrapper[5045]: E1125 23:55:55.647512 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5123ca0295faeae9447525a3f392ac3c87742a1f093b5bd1c2d65f4bfddc5dce\": container with ID starting with 5123ca0295faeae9447525a3f392ac3c87742a1f093b5bd1c2d65f4bfddc5dce not found: ID does not exist" containerID="5123ca0295faeae9447525a3f392ac3c87742a1f093b5bd1c2d65f4bfddc5dce" Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.647547 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5123ca0295faeae9447525a3f392ac3c87742a1f093b5bd1c2d65f4bfddc5dce"} err="failed to get container status \"5123ca0295faeae9447525a3f392ac3c87742a1f093b5bd1c2d65f4bfddc5dce\": rpc error: code = NotFound desc = could not find container \"5123ca0295faeae9447525a3f392ac3c87742a1f093b5bd1c2d65f4bfddc5dce\": container with ID starting with 5123ca0295faeae9447525a3f392ac3c87742a1f093b5bd1c2d65f4bfddc5dce not found: ID does not exist" Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.647587 5045 scope.go:117] "RemoveContainer" containerID="3e25af9c48ce566b3d391c9b568fa8a41d6338c4d6334f876f4357d6a2a9e408" Nov 25 23:55:55 crc kubenswrapper[5045]: E1125 23:55:55.647970 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e25af9c48ce566b3d391c9b568fa8a41d6338c4d6334f876f4357d6a2a9e408\": container with ID starting with 3e25af9c48ce566b3d391c9b568fa8a41d6338c4d6334f876f4357d6a2a9e408 not found: ID does not exist" containerID="3e25af9c48ce566b3d391c9b568fa8a41d6338c4d6334f876f4357d6a2a9e408" Nov 25 23:55:55 crc kubenswrapper[5045]: I1125 23:55:55.648027 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e25af9c48ce566b3d391c9b568fa8a41d6338c4d6334f876f4357d6a2a9e408"} err="failed to get container status \"3e25af9c48ce566b3d391c9b568fa8a41d6338c4d6334f876f4357d6a2a9e408\": rpc error: code = NotFound desc = could not find container \"3e25af9c48ce566b3d391c9b568fa8a41d6338c4d6334f876f4357d6a2a9e408\": container with ID starting with 3e25af9c48ce566b3d391c9b568fa8a41d6338c4d6334f876f4357d6a2a9e408 not found: ID does not exist" Nov 25 23:55:56 crc kubenswrapper[5045]: I1125 23:55:56.406948 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b735baca-6822-4c99-8feb-c9823dba65e9" path="/var/lib/kubelet/pods/b735baca-6822-4c99-8feb-c9823dba65e9/volumes" Nov 25 23:56:00 crc kubenswrapper[5045]: I1125 23:56:00.540622 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:56:00 crc kubenswrapper[5045]: I1125 23:56:00.541226 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:56:30 crc kubenswrapper[5045]: I1125 23:56:30.540605 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:56:30 crc kubenswrapper[5045]: I1125 23:56:30.541154 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:57:00 crc kubenswrapper[5045]: I1125 23:57:00.540860 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:57:00 crc kubenswrapper[5045]: I1125 23:57:00.541610 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:57:00 crc kubenswrapper[5045]: I1125 23:57:00.541689 5045 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 25 23:57:00 crc kubenswrapper[5045]: I1125 23:57:00.542899 5045 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8a05b3b735e54a7242e242635842fa11266f50eddb0a08bb26e8707c81df9b5b"} pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 23:57:00 crc kubenswrapper[5045]: I1125 23:57:00.543003 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" containerID="cri-o://8a05b3b735e54a7242e242635842fa11266f50eddb0a08bb26e8707c81df9b5b" gracePeriod=600 Nov 25 23:57:01 crc kubenswrapper[5045]: I1125 23:57:01.251907 5045 generic.go:334] "Generic (PLEG): container finished" podID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerID="8a05b3b735e54a7242e242635842fa11266f50eddb0a08bb26e8707c81df9b5b" exitCode=0 Nov 25 23:57:01 crc kubenswrapper[5045]: I1125 23:57:01.252108 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerDied","Data":"8a05b3b735e54a7242e242635842fa11266f50eddb0a08bb26e8707c81df9b5b"} Nov 25 23:57:01 crc kubenswrapper[5045]: I1125 23:57:01.252321 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerStarted","Data":"cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b"} Nov 25 23:57:01 crc kubenswrapper[5045]: I1125 23:57:01.252347 5045 scope.go:117] "RemoveContainer" containerID="22e0ad8621bff7175bb8c34e78cca8556af82585191143b6f645d08f59e5336a" Nov 25 23:57:19 crc kubenswrapper[5045]: I1125 23:57:19.483246 5045 generic.go:334] "Generic (PLEG): container finished" podID="44c7f66f-9267-4165-8d51-e8fa7b33b354" containerID="33ee1ee4bc0a7a45d56a732bb257d72c8b40f74178ccc53bcff0e210243e2fc7" exitCode=0 Nov 25 23:57:19 crc kubenswrapper[5045]: I1125 23:57:19.483359 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"44c7f66f-9267-4165-8d51-e8fa7b33b354","Type":"ContainerDied","Data":"33ee1ee4bc0a7a45d56a732bb257d72c8b40f74178ccc53bcff0e210243e2fc7"} Nov 25 23:57:20 crc kubenswrapper[5045]: I1125 23:57:20.964657 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.103170 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h54ms\" (UniqueName: \"kubernetes.io/projected/44c7f66f-9267-4165-8d51-e8fa7b33b354-kube-api-access-h54ms\") pod \"44c7f66f-9267-4165-8d51-e8fa7b33b354\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.103348 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/44c7f66f-9267-4165-8d51-e8fa7b33b354-openstack-config\") pod \"44c7f66f-9267-4165-8d51-e8fa7b33b354\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.103374 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/44c7f66f-9267-4165-8d51-e8fa7b33b354-test-operator-ephemeral-temporary\") pod \"44c7f66f-9267-4165-8d51-e8fa7b33b354\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.103401 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/44c7f66f-9267-4165-8d51-e8fa7b33b354-config-data\") pod \"44c7f66f-9267-4165-8d51-e8fa7b33b354\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.103436 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/44c7f66f-9267-4165-8d51-e8fa7b33b354-ssh-key\") pod \"44c7f66f-9267-4165-8d51-e8fa7b33b354\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.103455 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/44c7f66f-9267-4165-8d51-e8fa7b33b354-test-operator-ephemeral-workdir\") pod \"44c7f66f-9267-4165-8d51-e8fa7b33b354\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.103493 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/44c7f66f-9267-4165-8d51-e8fa7b33b354-ca-certs\") pod \"44c7f66f-9267-4165-8d51-e8fa7b33b354\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.103507 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/44c7f66f-9267-4165-8d51-e8fa7b33b354-openstack-config-secret\") pod \"44c7f66f-9267-4165-8d51-e8fa7b33b354\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.103540 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"44c7f66f-9267-4165-8d51-e8fa7b33b354\" (UID: \"44c7f66f-9267-4165-8d51-e8fa7b33b354\") " Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.104046 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44c7f66f-9267-4165-8d51-e8fa7b33b354-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "44c7f66f-9267-4165-8d51-e8fa7b33b354" (UID: "44c7f66f-9267-4165-8d51-e8fa7b33b354"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.105195 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44c7f66f-9267-4165-8d51-e8fa7b33b354-config-data" (OuterVolumeSpecName: "config-data") pod "44c7f66f-9267-4165-8d51-e8fa7b33b354" (UID: "44c7f66f-9267-4165-8d51-e8fa7b33b354"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.107392 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44c7f66f-9267-4165-8d51-e8fa7b33b354-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "44c7f66f-9267-4165-8d51-e8fa7b33b354" (UID: "44c7f66f-9267-4165-8d51-e8fa7b33b354"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.111999 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "test-operator-logs") pod "44c7f66f-9267-4165-8d51-e8fa7b33b354" (UID: "44c7f66f-9267-4165-8d51-e8fa7b33b354"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.112023 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44c7f66f-9267-4165-8d51-e8fa7b33b354-kube-api-access-h54ms" (OuterVolumeSpecName: "kube-api-access-h54ms") pod "44c7f66f-9267-4165-8d51-e8fa7b33b354" (UID: "44c7f66f-9267-4165-8d51-e8fa7b33b354"). InnerVolumeSpecName "kube-api-access-h54ms". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.131816 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44c7f66f-9267-4165-8d51-e8fa7b33b354-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "44c7f66f-9267-4165-8d51-e8fa7b33b354" (UID: "44c7f66f-9267-4165-8d51-e8fa7b33b354"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.135013 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44c7f66f-9267-4165-8d51-e8fa7b33b354-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "44c7f66f-9267-4165-8d51-e8fa7b33b354" (UID: "44c7f66f-9267-4165-8d51-e8fa7b33b354"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.146431 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44c7f66f-9267-4165-8d51-e8fa7b33b354-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "44c7f66f-9267-4165-8d51-e8fa7b33b354" (UID: "44c7f66f-9267-4165-8d51-e8fa7b33b354"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.178161 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44c7f66f-9267-4165-8d51-e8fa7b33b354-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "44c7f66f-9267-4165-8d51-e8fa7b33b354" (UID: "44c7f66f-9267-4165-8d51-e8fa7b33b354"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.206243 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h54ms\" (UniqueName: \"kubernetes.io/projected/44c7f66f-9267-4165-8d51-e8fa7b33b354-kube-api-access-h54ms\") on node \"crc\" DevicePath \"\"" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.206271 5045 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/44c7f66f-9267-4165-8d51-e8fa7b33b354-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.206281 5045 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/44c7f66f-9267-4165-8d51-e8fa7b33b354-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.206295 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/44c7f66f-9267-4165-8d51-e8fa7b33b354-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.206304 5045 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/44c7f66f-9267-4165-8d51-e8fa7b33b354-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.206311 5045 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/44c7f66f-9267-4165-8d51-e8fa7b33b354-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.206320 5045 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/44c7f66f-9267-4165-8d51-e8fa7b33b354-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.206388 5045 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/44c7f66f-9267-4165-8d51-e8fa7b33b354-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.206426 5045 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.231572 5045 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.308815 5045 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.517459 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.517588 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"44c7f66f-9267-4165-8d51-e8fa7b33b354","Type":"ContainerDied","Data":"d7e9cb3f68aee9fd2d1e17f9aeba70f5150d0d6f1ce0146334aa2b875665e908"} Nov 25 23:57:21 crc kubenswrapper[5045]: I1125 23:57:21.517611 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d7e9cb3f68aee9fd2d1e17f9aeba70f5150d0d6f1ce0146334aa2b875665e908" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.257762 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 23:57:29 crc kubenswrapper[5045]: E1125 23:57:29.259006 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b735baca-6822-4c99-8feb-c9823dba65e9" containerName="extract-content" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.259030 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="b735baca-6822-4c99-8feb-c9823dba65e9" containerName="extract-content" Nov 25 23:57:29 crc kubenswrapper[5045]: E1125 23:57:29.259084 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="999e83f8-81ba-46b4-ab74-b6aabe118d5b" containerName="extract-utilities" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.259101 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="999e83f8-81ba-46b4-ab74-b6aabe118d5b" containerName="extract-utilities" Nov 25 23:57:29 crc kubenswrapper[5045]: E1125 23:57:29.259128 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="999e83f8-81ba-46b4-ab74-b6aabe118d5b" containerName="extract-content" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.259142 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="999e83f8-81ba-46b4-ab74-b6aabe118d5b" containerName="extract-content" Nov 25 23:57:29 crc kubenswrapper[5045]: E1125 23:57:29.259164 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b735baca-6822-4c99-8feb-c9823dba65e9" containerName="registry-server" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.259179 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="b735baca-6822-4c99-8feb-c9823dba65e9" containerName="registry-server" Nov 25 23:57:29 crc kubenswrapper[5045]: E1125 23:57:29.259209 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="999e83f8-81ba-46b4-ab74-b6aabe118d5b" containerName="registry-server" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.259222 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="999e83f8-81ba-46b4-ab74-b6aabe118d5b" containerName="registry-server" Nov 25 23:57:29 crc kubenswrapper[5045]: E1125 23:57:29.259248 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b735baca-6822-4c99-8feb-c9823dba65e9" containerName="extract-utilities" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.259260 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="b735baca-6822-4c99-8feb-c9823dba65e9" containerName="extract-utilities" Nov 25 23:57:29 crc kubenswrapper[5045]: E1125 23:57:29.259293 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44c7f66f-9267-4165-8d51-e8fa7b33b354" containerName="tempest-tests-tempest-tests-runner" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.259306 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="44c7f66f-9267-4165-8d51-e8fa7b33b354" containerName="tempest-tests-tempest-tests-runner" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.259647 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="999e83f8-81ba-46b4-ab74-b6aabe118d5b" containerName="registry-server" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.259671 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="b735baca-6822-4c99-8feb-c9823dba65e9" containerName="registry-server" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.259736 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="44c7f66f-9267-4165-8d51-e8fa7b33b354" containerName="tempest-tests-tempest-tests-runner" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.260818 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.279393 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-c2w4r" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.280318 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.437349 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"856da688-70c4-4b00-9b63-0d93aee55d2b\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.437876 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f55hp\" (UniqueName: \"kubernetes.io/projected/856da688-70c4-4b00-9b63-0d93aee55d2b-kube-api-access-f55hp\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"856da688-70c4-4b00-9b63-0d93aee55d2b\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.540794 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f55hp\" (UniqueName: \"kubernetes.io/projected/856da688-70c4-4b00-9b63-0d93aee55d2b-kube-api-access-f55hp\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"856da688-70c4-4b00-9b63-0d93aee55d2b\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.541340 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"856da688-70c4-4b00-9b63-0d93aee55d2b\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.541781 5045 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"856da688-70c4-4b00-9b63-0d93aee55d2b\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.571190 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"856da688-70c4-4b00-9b63-0d93aee55d2b\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.578426 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f55hp\" (UniqueName: \"kubernetes.io/projected/856da688-70c4-4b00-9b63-0d93aee55d2b-kube-api-access-f55hp\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"856da688-70c4-4b00-9b63-0d93aee55d2b\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 23:57:29 crc kubenswrapper[5045]: I1125 23:57:29.605909 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 23:57:30 crc kubenswrapper[5045]: I1125 23:57:30.087466 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 23:57:30 crc kubenswrapper[5045]: I1125 23:57:30.098921 5045 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 23:57:30 crc kubenswrapper[5045]: I1125 23:57:30.654240 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"856da688-70c4-4b00-9b63-0d93aee55d2b","Type":"ContainerStarted","Data":"7ad236ec4bf6496f1798770b7d69eba2050df2cd385726e05ffd63c44dd7d6ce"} Nov 25 23:57:33 crc kubenswrapper[5045]: I1125 23:57:33.700015 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"856da688-70c4-4b00-9b63-0d93aee55d2b","Type":"ContainerStarted","Data":"2a4ecb5fe5205537329821d00c527e22c9b529c2b10215aa39e9886922e50fa7"} Nov 25 23:57:33 crc kubenswrapper[5045]: I1125 23:57:33.717202 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=1.446087885 podStartE2EDuration="4.717179092s" podCreationTimestamp="2025-11-25 23:57:29 +0000 UTC" firstStartedPulling="2025-11-25 23:57:30.098459647 +0000 UTC m=+3506.456140609" lastFinishedPulling="2025-11-25 23:57:33.369572694 +0000 UTC m=+3509.727231816" observedRunningTime="2025-11-25 23:57:33.716151163 +0000 UTC m=+3510.073810315" watchObservedRunningTime="2025-11-25 23:57:33.717179092 +0000 UTC m=+3510.074838234" Nov 25 23:57:56 crc kubenswrapper[5045]: I1125 23:57:56.675157 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-ntlcp/must-gather-6cj2k"] Nov 25 23:57:56 crc kubenswrapper[5045]: I1125 23:57:56.677884 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ntlcp/must-gather-6cj2k" Nov 25 23:57:56 crc kubenswrapper[5045]: I1125 23:57:56.679680 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-ntlcp"/"default-dockercfg-z9tvp" Nov 25 23:57:56 crc kubenswrapper[5045]: I1125 23:57:56.680003 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-ntlcp"/"openshift-service-ca.crt" Nov 25 23:57:56 crc kubenswrapper[5045]: I1125 23:57:56.680143 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-ntlcp"/"kube-root-ca.crt" Nov 25 23:57:56 crc kubenswrapper[5045]: I1125 23:57:56.685403 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-ntlcp/must-gather-6cj2k"] Nov 25 23:57:56 crc kubenswrapper[5045]: I1125 23:57:56.861445 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c1093fd7-5afc-4afd-be60-04a0885dbf62-must-gather-output\") pod \"must-gather-6cj2k\" (UID: \"c1093fd7-5afc-4afd-be60-04a0885dbf62\") " pod="openshift-must-gather-ntlcp/must-gather-6cj2k" Nov 25 23:57:56 crc kubenswrapper[5045]: I1125 23:57:56.861617 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jd4vp\" (UniqueName: \"kubernetes.io/projected/c1093fd7-5afc-4afd-be60-04a0885dbf62-kube-api-access-jd4vp\") pod \"must-gather-6cj2k\" (UID: \"c1093fd7-5afc-4afd-be60-04a0885dbf62\") " pod="openshift-must-gather-ntlcp/must-gather-6cj2k" Nov 25 23:57:56 crc kubenswrapper[5045]: I1125 23:57:56.963515 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jd4vp\" (UniqueName: \"kubernetes.io/projected/c1093fd7-5afc-4afd-be60-04a0885dbf62-kube-api-access-jd4vp\") pod \"must-gather-6cj2k\" (UID: \"c1093fd7-5afc-4afd-be60-04a0885dbf62\") " pod="openshift-must-gather-ntlcp/must-gather-6cj2k" Nov 25 23:57:56 crc kubenswrapper[5045]: I1125 23:57:56.963638 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c1093fd7-5afc-4afd-be60-04a0885dbf62-must-gather-output\") pod \"must-gather-6cj2k\" (UID: \"c1093fd7-5afc-4afd-be60-04a0885dbf62\") " pod="openshift-must-gather-ntlcp/must-gather-6cj2k" Nov 25 23:57:56 crc kubenswrapper[5045]: I1125 23:57:56.964371 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c1093fd7-5afc-4afd-be60-04a0885dbf62-must-gather-output\") pod \"must-gather-6cj2k\" (UID: \"c1093fd7-5afc-4afd-be60-04a0885dbf62\") " pod="openshift-must-gather-ntlcp/must-gather-6cj2k" Nov 25 23:57:56 crc kubenswrapper[5045]: I1125 23:57:56.994419 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jd4vp\" (UniqueName: \"kubernetes.io/projected/c1093fd7-5afc-4afd-be60-04a0885dbf62-kube-api-access-jd4vp\") pod \"must-gather-6cj2k\" (UID: \"c1093fd7-5afc-4afd-be60-04a0885dbf62\") " pod="openshift-must-gather-ntlcp/must-gather-6cj2k" Nov 25 23:57:57 crc kubenswrapper[5045]: I1125 23:57:57.002040 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ntlcp/must-gather-6cj2k" Nov 25 23:57:57 crc kubenswrapper[5045]: I1125 23:57:57.559317 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-ntlcp/must-gather-6cj2k"] Nov 25 23:57:57 crc kubenswrapper[5045]: I1125 23:57:57.966761 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ntlcp/must-gather-6cj2k" event={"ID":"c1093fd7-5afc-4afd-be60-04a0885dbf62","Type":"ContainerStarted","Data":"c2127dff302f7a989611410a6f015a71021afa63350f09a585be8ecf6f9ae249"} Nov 25 23:58:05 crc kubenswrapper[5045]: I1125 23:58:05.041129 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ntlcp/must-gather-6cj2k" event={"ID":"c1093fd7-5afc-4afd-be60-04a0885dbf62","Type":"ContainerStarted","Data":"8a820af9735619139628ff54fbc2bdaa6e40280d0d9d113343b344096d7c4075"} Nov 25 23:58:05 crc kubenswrapper[5045]: I1125 23:58:05.041996 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ntlcp/must-gather-6cj2k" event={"ID":"c1093fd7-5afc-4afd-be60-04a0885dbf62","Type":"ContainerStarted","Data":"e47dbfc15ad8633b5e0c4928a13b4c336077836c25f1206697d496fbebcdc017"} Nov 25 23:58:05 crc kubenswrapper[5045]: I1125 23:58:05.078276 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-ntlcp/must-gather-6cj2k" podStartSLOduration=2.727059181 podStartE2EDuration="9.07824268s" podCreationTimestamp="2025-11-25 23:57:56 +0000 UTC" firstStartedPulling="2025-11-25 23:57:57.547004519 +0000 UTC m=+3533.904663621" lastFinishedPulling="2025-11-25 23:58:03.898187968 +0000 UTC m=+3540.255847120" observedRunningTime="2025-11-25 23:58:05.062993809 +0000 UTC m=+3541.420652911" watchObservedRunningTime="2025-11-25 23:58:05.07824268 +0000 UTC m=+3541.435901832" Nov 25 23:58:08 crc kubenswrapper[5045]: I1125 23:58:08.660151 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-ntlcp/crc-debug-fr65m"] Nov 25 23:58:08 crc kubenswrapper[5045]: I1125 23:58:08.662931 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ntlcp/crc-debug-fr65m" Nov 25 23:58:08 crc kubenswrapper[5045]: I1125 23:58:08.737812 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d43caec2-9ade-410c-ad2a-cc091c999419-host\") pod \"crc-debug-fr65m\" (UID: \"d43caec2-9ade-410c-ad2a-cc091c999419\") " pod="openshift-must-gather-ntlcp/crc-debug-fr65m" Nov 25 23:58:08 crc kubenswrapper[5045]: I1125 23:58:08.737918 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wz4s9\" (UniqueName: \"kubernetes.io/projected/d43caec2-9ade-410c-ad2a-cc091c999419-kube-api-access-wz4s9\") pod \"crc-debug-fr65m\" (UID: \"d43caec2-9ade-410c-ad2a-cc091c999419\") " pod="openshift-must-gather-ntlcp/crc-debug-fr65m" Nov 25 23:58:08 crc kubenswrapper[5045]: I1125 23:58:08.839549 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d43caec2-9ade-410c-ad2a-cc091c999419-host\") pod \"crc-debug-fr65m\" (UID: \"d43caec2-9ade-410c-ad2a-cc091c999419\") " pod="openshift-must-gather-ntlcp/crc-debug-fr65m" Nov 25 23:58:08 crc kubenswrapper[5045]: I1125 23:58:08.839970 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wz4s9\" (UniqueName: \"kubernetes.io/projected/d43caec2-9ade-410c-ad2a-cc091c999419-kube-api-access-wz4s9\") pod \"crc-debug-fr65m\" (UID: \"d43caec2-9ade-410c-ad2a-cc091c999419\") " pod="openshift-must-gather-ntlcp/crc-debug-fr65m" Nov 25 23:58:08 crc kubenswrapper[5045]: I1125 23:58:08.839884 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d43caec2-9ade-410c-ad2a-cc091c999419-host\") pod \"crc-debug-fr65m\" (UID: \"d43caec2-9ade-410c-ad2a-cc091c999419\") " pod="openshift-must-gather-ntlcp/crc-debug-fr65m" Nov 25 23:58:08 crc kubenswrapper[5045]: I1125 23:58:08.874555 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wz4s9\" (UniqueName: \"kubernetes.io/projected/d43caec2-9ade-410c-ad2a-cc091c999419-kube-api-access-wz4s9\") pod \"crc-debug-fr65m\" (UID: \"d43caec2-9ade-410c-ad2a-cc091c999419\") " pod="openshift-must-gather-ntlcp/crc-debug-fr65m" Nov 25 23:58:08 crc kubenswrapper[5045]: I1125 23:58:08.982603 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ntlcp/crc-debug-fr65m" Nov 25 23:58:09 crc kubenswrapper[5045]: I1125 23:58:09.072043 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ntlcp/crc-debug-fr65m" event={"ID":"d43caec2-9ade-410c-ad2a-cc091c999419","Type":"ContainerStarted","Data":"d11251db49ee19fd23ba4f5bdbe508d2a1a1e1deb26e0300c219c1faf868461a"} Nov 25 23:58:22 crc kubenswrapper[5045]: I1125 23:58:22.220989 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ntlcp/crc-debug-fr65m" event={"ID":"d43caec2-9ade-410c-ad2a-cc091c999419","Type":"ContainerStarted","Data":"719915d315ea6629a74b4420ead80c1257b31628d3acd9eb1c9d8adf8fbf1133"} Nov 25 23:58:22 crc kubenswrapper[5045]: I1125 23:58:22.233532 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-ntlcp/crc-debug-fr65m" podStartSLOduration=1.963933677 podStartE2EDuration="14.233518632s" podCreationTimestamp="2025-11-25 23:58:08 +0000 UTC" firstStartedPulling="2025-11-25 23:58:09.022967964 +0000 UTC m=+3545.380627086" lastFinishedPulling="2025-11-25 23:58:21.292552929 +0000 UTC m=+3557.650212041" observedRunningTime="2025-11-25 23:58:22.233215574 +0000 UTC m=+3558.590874696" watchObservedRunningTime="2025-11-25 23:58:22.233518632 +0000 UTC m=+3558.591177744" Nov 25 23:58:22 crc kubenswrapper[5045]: I1125 23:58:22.244140 5045 scope.go:117] "RemoveContainer" containerID="4bc90aab59bab006e0e91721fa33266bd7bc180e58d9064bcaba652e19229bf7" Nov 25 23:58:22 crc kubenswrapper[5045]: I1125 23:58:22.276501 5045 scope.go:117] "RemoveContainer" containerID="fa69b193b322c2ccf0effabaaa4f20eee95630955b8e30737e525ef2ecfcf70c" Nov 25 23:58:22 crc kubenswrapper[5045]: I1125 23:58:22.322951 5045 scope.go:117] "RemoveContainer" containerID="ea250ff29b0a9db2b33e04f1bbd50d537cd262c4c088966f5994f8f0c3964815" Nov 25 23:58:22 crc kubenswrapper[5045]: I1125 23:58:22.353696 5045 scope.go:117] "RemoveContainer" containerID="95386e79d050ad4cd4198148ca1757834d7a7f2a29a32fd13203389bf0a36e02" Nov 25 23:59:00 crc kubenswrapper[5045]: I1125 23:59:00.540933 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:59:00 crc kubenswrapper[5045]: I1125 23:59:00.541551 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:59:28 crc kubenswrapper[5045]: I1125 23:59:28.896306 5045 generic.go:334] "Generic (PLEG): container finished" podID="d43caec2-9ade-410c-ad2a-cc091c999419" containerID="719915d315ea6629a74b4420ead80c1257b31628d3acd9eb1c9d8adf8fbf1133" exitCode=0 Nov 25 23:59:28 crc kubenswrapper[5045]: I1125 23:59:28.896366 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ntlcp/crc-debug-fr65m" event={"ID":"d43caec2-9ade-410c-ad2a-cc091c999419","Type":"ContainerDied","Data":"719915d315ea6629a74b4420ead80c1257b31628d3acd9eb1c9d8adf8fbf1133"} Nov 25 23:59:30 crc kubenswrapper[5045]: I1125 23:59:30.029335 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ntlcp/crc-debug-fr65m" Nov 25 23:59:30 crc kubenswrapper[5045]: I1125 23:59:30.067198 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-ntlcp/crc-debug-fr65m"] Nov 25 23:59:30 crc kubenswrapper[5045]: I1125 23:59:30.074580 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-ntlcp/crc-debug-fr65m"] Nov 25 23:59:30 crc kubenswrapper[5045]: I1125 23:59:30.103006 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wz4s9\" (UniqueName: \"kubernetes.io/projected/d43caec2-9ade-410c-ad2a-cc091c999419-kube-api-access-wz4s9\") pod \"d43caec2-9ade-410c-ad2a-cc091c999419\" (UID: \"d43caec2-9ade-410c-ad2a-cc091c999419\") " Nov 25 23:59:30 crc kubenswrapper[5045]: I1125 23:59:30.103054 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d43caec2-9ade-410c-ad2a-cc091c999419-host\") pod \"d43caec2-9ade-410c-ad2a-cc091c999419\" (UID: \"d43caec2-9ade-410c-ad2a-cc091c999419\") " Nov 25 23:59:30 crc kubenswrapper[5045]: I1125 23:59:30.103562 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d43caec2-9ade-410c-ad2a-cc091c999419-host" (OuterVolumeSpecName: "host") pod "d43caec2-9ade-410c-ad2a-cc091c999419" (UID: "d43caec2-9ade-410c-ad2a-cc091c999419"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:59:30 crc kubenswrapper[5045]: I1125 23:59:30.108097 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d43caec2-9ade-410c-ad2a-cc091c999419-kube-api-access-wz4s9" (OuterVolumeSpecName: "kube-api-access-wz4s9") pod "d43caec2-9ade-410c-ad2a-cc091c999419" (UID: "d43caec2-9ade-410c-ad2a-cc091c999419"). InnerVolumeSpecName "kube-api-access-wz4s9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:59:30 crc kubenswrapper[5045]: I1125 23:59:30.205598 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wz4s9\" (UniqueName: \"kubernetes.io/projected/d43caec2-9ade-410c-ad2a-cc091c999419-kube-api-access-wz4s9\") on node \"crc\" DevicePath \"\"" Nov 25 23:59:30 crc kubenswrapper[5045]: I1125 23:59:30.205655 5045 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d43caec2-9ade-410c-ad2a-cc091c999419-host\") on node \"crc\" DevicePath \"\"" Nov 25 23:59:30 crc kubenswrapper[5045]: I1125 23:59:30.410486 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d43caec2-9ade-410c-ad2a-cc091c999419" path="/var/lib/kubelet/pods/d43caec2-9ade-410c-ad2a-cc091c999419/volumes" Nov 25 23:59:30 crc kubenswrapper[5045]: I1125 23:59:30.540681 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 23:59:30 crc kubenswrapper[5045]: I1125 23:59:30.540779 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 23:59:30 crc kubenswrapper[5045]: I1125 23:59:30.916288 5045 scope.go:117] "RemoveContainer" containerID="719915d315ea6629a74b4420ead80c1257b31628d3acd9eb1c9d8adf8fbf1133" Nov 25 23:59:30 crc kubenswrapper[5045]: I1125 23:59:30.916381 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ntlcp/crc-debug-fr65m" Nov 25 23:59:31 crc kubenswrapper[5045]: I1125 23:59:31.264538 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-ntlcp/crc-debug-lbzwv"] Nov 25 23:59:31 crc kubenswrapper[5045]: E1125 23:59:31.264902 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d43caec2-9ade-410c-ad2a-cc091c999419" containerName="container-00" Nov 25 23:59:31 crc kubenswrapper[5045]: I1125 23:59:31.264913 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="d43caec2-9ade-410c-ad2a-cc091c999419" containerName="container-00" Nov 25 23:59:31 crc kubenswrapper[5045]: I1125 23:59:31.265084 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="d43caec2-9ade-410c-ad2a-cc091c999419" containerName="container-00" Nov 25 23:59:31 crc kubenswrapper[5045]: I1125 23:59:31.265644 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ntlcp/crc-debug-lbzwv" Nov 25 23:59:31 crc kubenswrapper[5045]: I1125 23:59:31.428904 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0a765368-6a5c-4e6e-837b-38ff427d1ca7-host\") pod \"crc-debug-lbzwv\" (UID: \"0a765368-6a5c-4e6e-837b-38ff427d1ca7\") " pod="openshift-must-gather-ntlcp/crc-debug-lbzwv" Nov 25 23:59:31 crc kubenswrapper[5045]: I1125 23:59:31.429606 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8frz\" (UniqueName: \"kubernetes.io/projected/0a765368-6a5c-4e6e-837b-38ff427d1ca7-kube-api-access-t8frz\") pod \"crc-debug-lbzwv\" (UID: \"0a765368-6a5c-4e6e-837b-38ff427d1ca7\") " pod="openshift-must-gather-ntlcp/crc-debug-lbzwv" Nov 25 23:59:31 crc kubenswrapper[5045]: I1125 23:59:31.531271 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8frz\" (UniqueName: \"kubernetes.io/projected/0a765368-6a5c-4e6e-837b-38ff427d1ca7-kube-api-access-t8frz\") pod \"crc-debug-lbzwv\" (UID: \"0a765368-6a5c-4e6e-837b-38ff427d1ca7\") " pod="openshift-must-gather-ntlcp/crc-debug-lbzwv" Nov 25 23:59:31 crc kubenswrapper[5045]: I1125 23:59:31.531499 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0a765368-6a5c-4e6e-837b-38ff427d1ca7-host\") pod \"crc-debug-lbzwv\" (UID: \"0a765368-6a5c-4e6e-837b-38ff427d1ca7\") " pod="openshift-must-gather-ntlcp/crc-debug-lbzwv" Nov 25 23:59:31 crc kubenswrapper[5045]: I1125 23:59:31.531607 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0a765368-6a5c-4e6e-837b-38ff427d1ca7-host\") pod \"crc-debug-lbzwv\" (UID: \"0a765368-6a5c-4e6e-837b-38ff427d1ca7\") " pod="openshift-must-gather-ntlcp/crc-debug-lbzwv" Nov 25 23:59:31 crc kubenswrapper[5045]: I1125 23:59:31.567500 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8frz\" (UniqueName: \"kubernetes.io/projected/0a765368-6a5c-4e6e-837b-38ff427d1ca7-kube-api-access-t8frz\") pod \"crc-debug-lbzwv\" (UID: \"0a765368-6a5c-4e6e-837b-38ff427d1ca7\") " pod="openshift-must-gather-ntlcp/crc-debug-lbzwv" Nov 25 23:59:31 crc kubenswrapper[5045]: I1125 23:59:31.600651 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ntlcp/crc-debug-lbzwv" Nov 25 23:59:31 crc kubenswrapper[5045]: I1125 23:59:31.929872 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ntlcp/crc-debug-lbzwv" event={"ID":"0a765368-6a5c-4e6e-837b-38ff427d1ca7","Type":"ContainerStarted","Data":"8259e13afd819db82e829ab635ba59994952568e054aa848c160e5c4232399fe"} Nov 25 23:59:32 crc kubenswrapper[5045]: I1125 23:59:32.948846 5045 generic.go:334] "Generic (PLEG): container finished" podID="0a765368-6a5c-4e6e-837b-38ff427d1ca7" containerID="5e914b058ce387d7363cd23bf15d799cc923617a9e527bd1a223353b0606e0fa" exitCode=0 Nov 25 23:59:32 crc kubenswrapper[5045]: I1125 23:59:32.948969 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ntlcp/crc-debug-lbzwv" event={"ID":"0a765368-6a5c-4e6e-837b-38ff427d1ca7","Type":"ContainerDied","Data":"5e914b058ce387d7363cd23bf15d799cc923617a9e527bd1a223353b0606e0fa"} Nov 25 23:59:33 crc kubenswrapper[5045]: I1125 23:59:33.541669 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-ntlcp/crc-debug-lbzwv"] Nov 25 23:59:33 crc kubenswrapper[5045]: I1125 23:59:33.551003 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-ntlcp/crc-debug-lbzwv"] Nov 25 23:59:34 crc kubenswrapper[5045]: I1125 23:59:34.060493 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ntlcp/crc-debug-lbzwv" Nov 25 23:59:34 crc kubenswrapper[5045]: I1125 23:59:34.180687 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0a765368-6a5c-4e6e-837b-38ff427d1ca7-host\") pod \"0a765368-6a5c-4e6e-837b-38ff427d1ca7\" (UID: \"0a765368-6a5c-4e6e-837b-38ff427d1ca7\") " Nov 25 23:59:34 crc kubenswrapper[5045]: I1125 23:59:34.180889 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0a765368-6a5c-4e6e-837b-38ff427d1ca7-host" (OuterVolumeSpecName: "host") pod "0a765368-6a5c-4e6e-837b-38ff427d1ca7" (UID: "0a765368-6a5c-4e6e-837b-38ff427d1ca7"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:59:34 crc kubenswrapper[5045]: I1125 23:59:34.180931 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t8frz\" (UniqueName: \"kubernetes.io/projected/0a765368-6a5c-4e6e-837b-38ff427d1ca7-kube-api-access-t8frz\") pod \"0a765368-6a5c-4e6e-837b-38ff427d1ca7\" (UID: \"0a765368-6a5c-4e6e-837b-38ff427d1ca7\") " Nov 25 23:59:34 crc kubenswrapper[5045]: I1125 23:59:34.181683 5045 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0a765368-6a5c-4e6e-837b-38ff427d1ca7-host\") on node \"crc\" DevicePath \"\"" Nov 25 23:59:34 crc kubenswrapper[5045]: I1125 23:59:34.187405 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a765368-6a5c-4e6e-837b-38ff427d1ca7-kube-api-access-t8frz" (OuterVolumeSpecName: "kube-api-access-t8frz") pod "0a765368-6a5c-4e6e-837b-38ff427d1ca7" (UID: "0a765368-6a5c-4e6e-837b-38ff427d1ca7"). InnerVolumeSpecName "kube-api-access-t8frz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:59:34 crc kubenswrapper[5045]: I1125 23:59:34.284063 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t8frz\" (UniqueName: \"kubernetes.io/projected/0a765368-6a5c-4e6e-837b-38ff427d1ca7-kube-api-access-t8frz\") on node \"crc\" DevicePath \"\"" Nov 25 23:59:34 crc kubenswrapper[5045]: I1125 23:59:34.422265 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a765368-6a5c-4e6e-837b-38ff427d1ca7" path="/var/lib/kubelet/pods/0a765368-6a5c-4e6e-837b-38ff427d1ca7/volumes" Nov 25 23:59:34 crc kubenswrapper[5045]: I1125 23:59:34.774399 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-ntlcp/crc-debug-w8tsg"] Nov 25 23:59:34 crc kubenswrapper[5045]: E1125 23:59:34.774828 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a765368-6a5c-4e6e-837b-38ff427d1ca7" containerName="container-00" Nov 25 23:59:34 crc kubenswrapper[5045]: I1125 23:59:34.774847 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a765368-6a5c-4e6e-837b-38ff427d1ca7" containerName="container-00" Nov 25 23:59:34 crc kubenswrapper[5045]: I1125 23:59:34.775042 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a765368-6a5c-4e6e-837b-38ff427d1ca7" containerName="container-00" Nov 25 23:59:34 crc kubenswrapper[5045]: I1125 23:59:34.775731 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ntlcp/crc-debug-w8tsg" Nov 25 23:59:34 crc kubenswrapper[5045]: I1125 23:59:34.899827 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7bjl\" (UniqueName: \"kubernetes.io/projected/3581eb74-d59e-4bb9-a52b-5280570897b1-kube-api-access-j7bjl\") pod \"crc-debug-w8tsg\" (UID: \"3581eb74-d59e-4bb9-a52b-5280570897b1\") " pod="openshift-must-gather-ntlcp/crc-debug-w8tsg" Nov 25 23:59:34 crc kubenswrapper[5045]: I1125 23:59:34.900974 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3581eb74-d59e-4bb9-a52b-5280570897b1-host\") pod \"crc-debug-w8tsg\" (UID: \"3581eb74-d59e-4bb9-a52b-5280570897b1\") " pod="openshift-must-gather-ntlcp/crc-debug-w8tsg" Nov 25 23:59:34 crc kubenswrapper[5045]: I1125 23:59:34.968018 5045 scope.go:117] "RemoveContainer" containerID="5e914b058ce387d7363cd23bf15d799cc923617a9e527bd1a223353b0606e0fa" Nov 25 23:59:34 crc kubenswrapper[5045]: I1125 23:59:34.968099 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ntlcp/crc-debug-lbzwv" Nov 25 23:59:35 crc kubenswrapper[5045]: I1125 23:59:35.003364 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3581eb74-d59e-4bb9-a52b-5280570897b1-host\") pod \"crc-debug-w8tsg\" (UID: \"3581eb74-d59e-4bb9-a52b-5280570897b1\") " pod="openshift-must-gather-ntlcp/crc-debug-w8tsg" Nov 25 23:59:35 crc kubenswrapper[5045]: I1125 23:59:35.003461 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7bjl\" (UniqueName: \"kubernetes.io/projected/3581eb74-d59e-4bb9-a52b-5280570897b1-kube-api-access-j7bjl\") pod \"crc-debug-w8tsg\" (UID: \"3581eb74-d59e-4bb9-a52b-5280570897b1\") " pod="openshift-must-gather-ntlcp/crc-debug-w8tsg" Nov 25 23:59:35 crc kubenswrapper[5045]: I1125 23:59:35.003539 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3581eb74-d59e-4bb9-a52b-5280570897b1-host\") pod \"crc-debug-w8tsg\" (UID: \"3581eb74-d59e-4bb9-a52b-5280570897b1\") " pod="openshift-must-gather-ntlcp/crc-debug-w8tsg" Nov 25 23:59:35 crc kubenswrapper[5045]: I1125 23:59:35.025705 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7bjl\" (UniqueName: \"kubernetes.io/projected/3581eb74-d59e-4bb9-a52b-5280570897b1-kube-api-access-j7bjl\") pod \"crc-debug-w8tsg\" (UID: \"3581eb74-d59e-4bb9-a52b-5280570897b1\") " pod="openshift-must-gather-ntlcp/crc-debug-w8tsg" Nov 25 23:59:35 crc kubenswrapper[5045]: I1125 23:59:35.094634 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ntlcp/crc-debug-w8tsg" Nov 25 23:59:35 crc kubenswrapper[5045]: W1125 23:59:35.125285 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3581eb74_d59e_4bb9_a52b_5280570897b1.slice/crio-10b7c45a825e022e5456deeaffa7ed4ee8278a615a64942fbda663de6bbb1782 WatchSource:0}: Error finding container 10b7c45a825e022e5456deeaffa7ed4ee8278a615a64942fbda663de6bbb1782: Status 404 returned error can't find the container with id 10b7c45a825e022e5456deeaffa7ed4ee8278a615a64942fbda663de6bbb1782 Nov 25 23:59:36 crc kubenswrapper[5045]: I1125 23:59:36.000422 5045 generic.go:334] "Generic (PLEG): container finished" podID="3581eb74-d59e-4bb9-a52b-5280570897b1" containerID="1124e0da1a1cf4940407c486efd44f72477fc0a7e8e75d6fb522e124f4317a76" exitCode=0 Nov 25 23:59:36 crc kubenswrapper[5045]: I1125 23:59:36.000768 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ntlcp/crc-debug-w8tsg" event={"ID":"3581eb74-d59e-4bb9-a52b-5280570897b1","Type":"ContainerDied","Data":"1124e0da1a1cf4940407c486efd44f72477fc0a7e8e75d6fb522e124f4317a76"} Nov 25 23:59:36 crc kubenswrapper[5045]: I1125 23:59:36.000947 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ntlcp/crc-debug-w8tsg" event={"ID":"3581eb74-d59e-4bb9-a52b-5280570897b1","Type":"ContainerStarted","Data":"10b7c45a825e022e5456deeaffa7ed4ee8278a615a64942fbda663de6bbb1782"} Nov 25 23:59:36 crc kubenswrapper[5045]: I1125 23:59:36.061473 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-ntlcp/crc-debug-w8tsg"] Nov 25 23:59:36 crc kubenswrapper[5045]: I1125 23:59:36.070398 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-ntlcp/crc-debug-w8tsg"] Nov 25 23:59:37 crc kubenswrapper[5045]: I1125 23:59:37.129033 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ntlcp/crc-debug-w8tsg" Nov 25 23:59:37 crc kubenswrapper[5045]: I1125 23:59:37.248361 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j7bjl\" (UniqueName: \"kubernetes.io/projected/3581eb74-d59e-4bb9-a52b-5280570897b1-kube-api-access-j7bjl\") pod \"3581eb74-d59e-4bb9-a52b-5280570897b1\" (UID: \"3581eb74-d59e-4bb9-a52b-5280570897b1\") " Nov 25 23:59:37 crc kubenswrapper[5045]: I1125 23:59:37.248563 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3581eb74-d59e-4bb9-a52b-5280570897b1-host\") pod \"3581eb74-d59e-4bb9-a52b-5280570897b1\" (UID: \"3581eb74-d59e-4bb9-a52b-5280570897b1\") " Nov 25 23:59:37 crc kubenswrapper[5045]: I1125 23:59:37.248668 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3581eb74-d59e-4bb9-a52b-5280570897b1-host" (OuterVolumeSpecName: "host") pod "3581eb74-d59e-4bb9-a52b-5280570897b1" (UID: "3581eb74-d59e-4bb9-a52b-5280570897b1"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 23:59:37 crc kubenswrapper[5045]: I1125 23:59:37.249084 5045 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3581eb74-d59e-4bb9-a52b-5280570897b1-host\") on node \"crc\" DevicePath \"\"" Nov 25 23:59:37 crc kubenswrapper[5045]: I1125 23:59:37.254080 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3581eb74-d59e-4bb9-a52b-5280570897b1-kube-api-access-j7bjl" (OuterVolumeSpecName: "kube-api-access-j7bjl") pod "3581eb74-d59e-4bb9-a52b-5280570897b1" (UID: "3581eb74-d59e-4bb9-a52b-5280570897b1"). InnerVolumeSpecName "kube-api-access-j7bjl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 23:59:37 crc kubenswrapper[5045]: I1125 23:59:37.351139 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j7bjl\" (UniqueName: \"kubernetes.io/projected/3581eb74-d59e-4bb9-a52b-5280570897b1-kube-api-access-j7bjl\") on node \"crc\" DevicePath \"\"" Nov 25 23:59:38 crc kubenswrapper[5045]: I1125 23:59:38.019583 5045 scope.go:117] "RemoveContainer" containerID="1124e0da1a1cf4940407c486efd44f72477fc0a7e8e75d6fb522e124f4317a76" Nov 25 23:59:38 crc kubenswrapper[5045]: I1125 23:59:38.019640 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ntlcp/crc-debug-w8tsg" Nov 25 23:59:38 crc kubenswrapper[5045]: I1125 23:59:38.406635 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3581eb74-d59e-4bb9-a52b-5280570897b1" path="/var/lib/kubelet/pods/3581eb74-d59e-4bb9-a52b-5280570897b1/volumes" Nov 25 23:59:52 crc kubenswrapper[5045]: I1125 23:59:52.733428 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6477c784d-pjbws_4cf1306a-a479-4be9-9d81-a24e584294a5/barbican-api/0.log" Nov 25 23:59:52 crc kubenswrapper[5045]: I1125 23:59:52.786461 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6477c784d-pjbws_4cf1306a-a479-4be9-9d81-a24e584294a5/barbican-api-log/0.log" Nov 25 23:59:52 crc kubenswrapper[5045]: I1125 23:59:52.915380 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-f86f47df6-cktpw_930a6fb5-dcf0-455c-97a7-5446766b0d01/barbican-keystone-listener/0.log" Nov 25 23:59:52 crc kubenswrapper[5045]: I1125 23:59:52.964963 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-f86f47df6-cktpw_930a6fb5-dcf0-455c-97a7-5446766b0d01/barbican-keystone-listener-log/0.log" Nov 25 23:59:53 crc kubenswrapper[5045]: I1125 23:59:53.097901 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-679bb9cf9-plnhs_b77ab75f-32f2-4664-a48e-76699f609a7b/barbican-worker/0.log" Nov 25 23:59:53 crc kubenswrapper[5045]: I1125 23:59:53.156630 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-679bb9cf9-plnhs_b77ab75f-32f2-4664-a48e-76699f609a7b/barbican-worker-log/0.log" Nov 25 23:59:53 crc kubenswrapper[5045]: I1125 23:59:53.255217 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv_7301c52a-3ce7-478e-867e-6f458de32f19/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 23:59:53 crc kubenswrapper[5045]: I1125 23:59:53.359161 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_3d111259-fd01-46df-9e3b-e25c7a05f59d/ceilometer-central-agent/0.log" Nov 25 23:59:53 crc kubenswrapper[5045]: I1125 23:59:53.410704 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_3d111259-fd01-46df-9e3b-e25c7a05f59d/ceilometer-notification-agent/0.log" Nov 25 23:59:53 crc kubenswrapper[5045]: I1125 23:59:53.442401 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_3d111259-fd01-46df-9e3b-e25c7a05f59d/proxy-httpd/0.log" Nov 25 23:59:53 crc kubenswrapper[5045]: I1125 23:59:53.503443 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_3d111259-fd01-46df-9e3b-e25c7a05f59d/sg-core/0.log" Nov 25 23:59:53 crc kubenswrapper[5045]: I1125 23:59:53.565537 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-client-edpm-deployment-openstack-edpm-ipam-52524_a98dde15-bff6-4ed8-8216-142372401818/ceph-client-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 23:59:53 crc kubenswrapper[5045]: I1125 23:59:53.701100 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5_8e0f7b11-159a-4941-9abe-03adde83a57c/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 23:59:53 crc kubenswrapper[5045]: I1125 23:59:53.821090 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_945c5d6c-f96a-4d6c-a78d-795e26a25699/cinder-api-log/0.log" Nov 25 23:59:53 crc kubenswrapper[5045]: I1125 23:59:53.845061 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_945c5d6c-f96a-4d6c-a78d-795e26a25699/cinder-api/0.log" Nov 25 23:59:53 crc kubenswrapper[5045]: I1125 23:59:53.992195 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_d86361ad-5146-475b-b0d4-c505b002904b/probe/0.log" Nov 25 23:59:54 crc kubenswrapper[5045]: I1125 23:59:54.150999 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_d86361ad-5146-475b-b0d4-c505b002904b/cinder-backup/0.log" Nov 25 23:59:54 crc kubenswrapper[5045]: I1125 23:59:54.209379 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_efac58ce-5053-4a60-bcd4-41b7c1f483f2/cinder-scheduler/0.log" Nov 25 23:59:54 crc kubenswrapper[5045]: I1125 23:59:54.220807 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_efac58ce-5053-4a60-bcd4-41b7c1f483f2/probe/0.log" Nov 25 23:59:54 crc kubenswrapper[5045]: I1125 23:59:54.429493 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_3dcd1a1a-085e-472a-8dab-788bba3c3ce4/cinder-volume/0.log" Nov 25 23:59:54 crc kubenswrapper[5045]: I1125 23:59:54.439608 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_3dcd1a1a-085e-472a-8dab-788bba3c3ce4/probe/0.log" Nov 25 23:59:54 crc kubenswrapper[5045]: I1125 23:59:54.499546 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb_c9631995-c169-41d0-90cb-9d1566919f23/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 23:59:54 crc kubenswrapper[5045]: I1125 23:59:54.633805 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h_6ffaaa5a-fd54-4e1a-8591-6991152aa8de/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 23:59:54 crc kubenswrapper[5045]: I1125 23:59:54.718222 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-69655fd4bf-4xxtm_1fefc039-452f-4d2f-a4f8-2e73833e05f6/init/0.log" Nov 25 23:59:54 crc kubenswrapper[5045]: I1125 23:59:54.853480 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-69655fd4bf-4xxtm_1fefc039-452f-4d2f-a4f8-2e73833e05f6/init/0.log" Nov 25 23:59:54 crc kubenswrapper[5045]: I1125 23:59:54.909420 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-69655fd4bf-4xxtm_1fefc039-452f-4d2f-a4f8-2e73833e05f6/dnsmasq-dns/0.log" Nov 25 23:59:54 crc kubenswrapper[5045]: I1125 23:59:54.941866 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_bf6cc533-9827-4132-9d84-50fe49efef41/glance-httpd/0.log" Nov 25 23:59:55 crc kubenswrapper[5045]: I1125 23:59:55.051682 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_bf6cc533-9827-4132-9d84-50fe49efef41/glance-log/0.log" Nov 25 23:59:55 crc kubenswrapper[5045]: I1125 23:59:55.123249 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_c46cc706-d7b4-4d2a-b75e-a8bed8a125eb/glance-log/0.log" Nov 25 23:59:55 crc kubenswrapper[5045]: I1125 23:59:55.133808 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_c46cc706-d7b4-4d2a-b75e-a8bed8a125eb/glance-httpd/0.log" Nov 25 23:59:55 crc kubenswrapper[5045]: I1125 23:59:55.399838 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-88c75b4b-7vjc8_a78ee35a-ac96-40b7-b9aa-92bdaadf339b/horizon/0.log" Nov 25 23:59:55 crc kubenswrapper[5045]: I1125 23:59:55.443379 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-88c75b4b-7vjc8_a78ee35a-ac96-40b7-b9aa-92bdaadf339b/horizon-log/0.log" Nov 25 23:59:55 crc kubenswrapper[5045]: I1125 23:59:55.520557 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-knvng_4dcec4cb-07ca-4c0a-afaa-672e534cf521/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 23:59:55 crc kubenswrapper[5045]: I1125 23:59:55.658341 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-lzmxw_f0795a52-0ba0-497f-a55c-8888a54c0fa8/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 23:59:55 crc kubenswrapper[5045]: I1125 23:59:55.865567 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2/kube-state-metrics/0.log" Nov 25 23:59:55 crc kubenswrapper[5045]: I1125 23:59:55.875748 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-68c57f7894-dq5jz_e85b05a0-5d74-4df9-b09c-a68596f45b6e/keystone-api/0.log" Nov 25 23:59:56 crc kubenswrapper[5045]: I1125 23:59:56.078165 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5_886ae822-5a4e-4578-a137-1322687c1a77/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 23:59:56 crc kubenswrapper[5045]: I1125 23:59:56.135762 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-2c9f-account-create-update-zpp6n_016700b4-9818-4d96-be4d-d6b07316b91f/mariadb-account-create-update/0.log" Nov 25 23:59:56 crc kubenswrapper[5045]: I1125 23:59:56.284956 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_0a1b4a8a-456e-4756-8321-079731f5f729/manila-api-log/0.log" Nov 25 23:59:56 crc kubenswrapper[5045]: I1125 23:59:56.373350 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-db-create-2t5tj_c091d493-4263-41da-a276-6dc859d7d5e1/mariadb-database-create/0.log" Nov 25 23:59:56 crc kubenswrapper[5045]: I1125 23:59:56.410464 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_0a1b4a8a-456e-4756-8321-079731f5f729/manila-api/0.log" Nov 25 23:59:56 crc kubenswrapper[5045]: I1125 23:59:56.508476 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-db-sync-fjsxq_8b764d3f-bf9c-4407-b5d5-6f2834714d50/manila-db-sync/0.log" Nov 25 23:59:56 crc kubenswrapper[5045]: I1125 23:59:56.612942 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_3b78dcef-2652-44a3-8d97-cb40f963d225/probe/0.log" Nov 25 23:59:56 crc kubenswrapper[5045]: I1125 23:59:56.699890 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_3b78dcef-2652-44a3-8d97-cb40f963d225/manila-scheduler/0.log" Nov 25 23:59:56 crc kubenswrapper[5045]: I1125 23:59:56.754892 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_ce03c0a7-a5a4-48df-8d94-5d3c7464efc1/manila-share/0.log" Nov 25 23:59:56 crc kubenswrapper[5045]: I1125 23:59:56.811850 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_ce03c0a7-a5a4-48df-8d94-5d3c7464efc1/probe/0.log" Nov 25 23:59:56 crc kubenswrapper[5045]: I1125 23:59:56.951043 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5db8cdc695-2sz2g_6970371c-e072-49ea-97b5-a6bed28d5372/neutron-api/0.log" Nov 25 23:59:57 crc kubenswrapper[5045]: I1125 23:59:57.088996 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5db8cdc695-2sz2g_6970371c-e072-49ea-97b5-a6bed28d5372/neutron-httpd/0.log" Nov 25 23:59:57 crc kubenswrapper[5045]: I1125 23:59:57.240853 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6_1f6014b1-5500-49d6-a729-dfb677b8a1cc/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 23:59:57 crc kubenswrapper[5045]: I1125 23:59:57.399494 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_95d30530-9d52-442a-94e0-3e85871f0c4f/memcached/0.log" Nov 25 23:59:57 crc kubenswrapper[5045]: I1125 23:59:57.490611 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_65d3b56d-211c-4392-b6ee-449e68d546a0/nova-api-log/0.log" Nov 25 23:59:57 crc kubenswrapper[5045]: I1125 23:59:57.610837 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_98d79db2-7dcf-4b5a-8d2b-bd1a799b843f/nova-cell0-conductor-conductor/0.log" Nov 25 23:59:57 crc kubenswrapper[5045]: I1125 23:59:57.673729 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_65d3b56d-211c-4392-b6ee-449e68d546a0/nova-api-api/0.log" Nov 25 23:59:57 crc kubenswrapper[5045]: I1125 23:59:57.749804 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_cf45160d-7c2c-4d8c-81da-db68ed300d2d/nova-cell1-conductor-conductor/0.log" Nov 25 23:59:57 crc kubenswrapper[5045]: I1125 23:59:57.868058 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_4a276597-ed77-4cd6-95b0-57e64be23060/nova-cell1-novncproxy-novncproxy/0.log" Nov 25 23:59:57 crc kubenswrapper[5045]: I1125 23:59:57.987563 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk_2a2c3598-f1cb-4b09-8410-48442361a88a/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 23:59:58 crc kubenswrapper[5045]: I1125 23:59:58.008859 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_4af60a2e-c5d2-4f99-912d-8c269561a2e0/nova-metadata-log/0.log" Nov 25 23:59:58 crc kubenswrapper[5045]: I1125 23:59:58.280534 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_abaa26fa-f1a8-4249-8179-ad1b64334be5/mysql-bootstrap/0.log" Nov 25 23:59:58 crc kubenswrapper[5045]: I1125 23:59:58.395406 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_4fc80e12-1f82-458d-a8c1-4e7625a9381c/nova-scheduler-scheduler/0.log" Nov 25 23:59:58 crc kubenswrapper[5045]: I1125 23:59:58.469774 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_abaa26fa-f1a8-4249-8179-ad1b64334be5/mysql-bootstrap/0.log" Nov 25 23:59:58 crc kubenswrapper[5045]: I1125 23:59:58.507364 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_abaa26fa-f1a8-4249-8179-ad1b64334be5/galera/0.log" Nov 25 23:59:58 crc kubenswrapper[5045]: I1125 23:59:58.786676 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_4af60a2e-c5d2-4f99-912d-8c269561a2e0/nova-metadata-metadata/0.log" Nov 25 23:59:58 crc kubenswrapper[5045]: I1125 23:59:58.810300 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_981d9260-fe05-4c33-9c46-c65a7a31c7b1/mysql-bootstrap/0.log" Nov 25 23:59:58 crc kubenswrapper[5045]: I1125 23:59:58.959569 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_981d9260-fe05-4c33-9c46-c65a7a31c7b1/mysql-bootstrap/0.log" Nov 25 23:59:58 crc kubenswrapper[5045]: I1125 23:59:58.993537 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_7909b434-b7ea-46af-8c4e-b5454df0ba0f/openstackclient/0.log" Nov 25 23:59:59 crc kubenswrapper[5045]: I1125 23:59:59.000911 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_981d9260-fe05-4c33-9c46-c65a7a31c7b1/galera/0.log" Nov 25 23:59:59 crc kubenswrapper[5045]: I1125 23:59:59.187581 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-wrcbr_a5ad3a64-612a-442a-beed-2dcf6303b974/openstack-network-exporter/0.log" Nov 25 23:59:59 crc kubenswrapper[5045]: I1125 23:59:59.200534 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-kfwsl_efb42386-0a5d-423f-b31e-13e9433271ba/ovn-controller/0.log" Nov 25 23:59:59 crc kubenswrapper[5045]: I1125 23:59:59.312503 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-z2vw6_1765d304-e95f-43d5-9655-84e468fe332e/ovsdb-server-init/0.log" Nov 25 23:59:59 crc kubenswrapper[5045]: I1125 23:59:59.495221 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-z2vw6_1765d304-e95f-43d5-9655-84e468fe332e/ovsdb-server-init/0.log" Nov 25 23:59:59 crc kubenswrapper[5045]: I1125 23:59:59.503691 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-z2vw6_1765d304-e95f-43d5-9655-84e468fe332e/ovsdb-server/0.log" Nov 25 23:59:59 crc kubenswrapper[5045]: I1125 23:59:59.505098 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-z2vw6_1765d304-e95f-43d5-9655-84e468fe332e/ovs-vswitchd/0.log" Nov 25 23:59:59 crc kubenswrapper[5045]: I1125 23:59:59.554627 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-2p27n_9951b664-25eb-49a9-ba49-6bd594f857df/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 23:59:59 crc kubenswrapper[5045]: I1125 23:59:59.699102 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_f7cd8ff8-57e1-4909-b7c7-93f707770aaa/ovn-northd/0.log" Nov 25 23:59:59 crc kubenswrapper[5045]: I1125 23:59:59.699355 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_f7cd8ff8-57e1-4909-b7c7-93f707770aaa/openstack-network-exporter/0.log" Nov 25 23:59:59 crc kubenswrapper[5045]: I1125 23:59:59.757041 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_3f0b6c93-84c1-4b4a-8d1d-844d035fe867/openstack-network-exporter/0.log" Nov 25 23:59:59 crc kubenswrapper[5045]: I1125 23:59:59.861926 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_3f0b6c93-84c1-4b4a-8d1d-844d035fe867/ovsdbserver-nb/0.log" Nov 25 23:59:59 crc kubenswrapper[5045]: I1125 23:59:59.891358 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_dca2a357-2bb3-4400-a74b-5ec428e7a710/openstack-network-exporter/0.log" Nov 25 23:59:59 crc kubenswrapper[5045]: I1125 23:59:59.936019 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_dca2a357-2bb3-4400-a74b-5ec428e7a710/ovsdbserver-sb/0.log" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.036849 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-65cddd5cf6-fpz78_3a86c07d-787b-4255-8861-e1c03bc78303/placement-api/0.log" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.130360 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_74ac97d4-b89e-47c2-b249-9e70da06d165/setup-container/0.log" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.151008 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-65cddd5cf6-fpz78_3a86c07d-787b-4255-8861-e1c03bc78303/placement-log/0.log" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.205036 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-pruner-29401920-mp99l"] Nov 26 00:00:00 crc kubenswrapper[5045]: E1126 00:00:00.205622 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3581eb74-d59e-4bb9-a52b-5280570897b1" containerName="container-00" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.205645 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="3581eb74-d59e-4bb9-a52b-5280570897b1" containerName="container-00" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.205861 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="3581eb74-d59e-4bb9-a52b-5280570897b1" containerName="container-00" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.206543 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29401920-mp99l" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.208742 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"serviceca" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.208933 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"pruner-dockercfg-p7bcw" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.216921 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401920-67qfs"] Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.218439 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-67qfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.222013 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.222208 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.226561 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-purge-29401920-qtzfs"] Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.228005 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-purge-29401920-qtzfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.236535 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.251439 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-purge-29401920-5drnw"] Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.254419 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-purge-29401920-5drnw" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.261348 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-purge-29401920-qtzfs"] Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.269529 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.276516 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-pruner-29401920-mp99l"] Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.294347 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f26f45b5-c987-4eba-9593-98007bd1ffe0-secret-volume\") pod \"collect-profiles-29401920-67qfs\" (UID: \"f26f45b5-c987-4eba-9593-98007bd1ffe0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-67qfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.294636 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7hgb\" (UniqueName: \"kubernetes.io/projected/f26f45b5-c987-4eba-9593-98007bd1ffe0-kube-api-access-r7hgb\") pod \"collect-profiles-29401920-67qfs\" (UID: \"f26f45b5-c987-4eba-9593-98007bd1ffe0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-67qfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.294665 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5txw9\" (UniqueName: \"kubernetes.io/projected/e1a924fe-809e-4cd2-9e0a-1859228aa009-kube-api-access-5txw9\") pod \"image-pruner-29401920-mp99l\" (UID: \"e1a924fe-809e-4cd2-9e0a-1859228aa009\") " pod="openshift-image-registry/image-pruner-29401920-mp99l" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.295117 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f26f45b5-c987-4eba-9593-98007bd1ffe0-config-volume\") pod \"collect-profiles-29401920-67qfs\" (UID: \"f26f45b5-c987-4eba-9593-98007bd1ffe0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-67qfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.295247 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/e1a924fe-809e-4cd2-9e0a-1859228aa009-serviceca\") pod \"image-pruner-29401920-mp99l\" (UID: \"e1a924fe-809e-4cd2-9e0a-1859228aa009\") " pod="openshift-image-registry/image-pruner-29401920-mp99l" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.297580 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401920-67qfs"] Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.313699 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-purge-29401920-5drnw"] Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.397122 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/e1a924fe-809e-4cd2-9e0a-1859228aa009-serviceca\") pod \"image-pruner-29401920-mp99l\" (UID: \"e1a924fe-809e-4cd2-9e0a-1859228aa009\") " pod="openshift-image-registry/image-pruner-29401920-mp99l" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.397169 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ce1d9883-706a-4df4-82e8-255c03424e7a-scripts\") pod \"nova-cell0-db-purge-29401920-5drnw\" (UID: \"ce1d9883-706a-4df4-82e8-255c03424e7a\") " pod="openstack/nova-cell0-db-purge-29401920-5drnw" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.397205 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmp7z\" (UniqueName: \"kubernetes.io/projected/ce1d9883-706a-4df4-82e8-255c03424e7a-kube-api-access-zmp7z\") pod \"nova-cell0-db-purge-29401920-5drnw\" (UID: \"ce1d9883-706a-4df4-82e8-255c03424e7a\") " pod="openstack/nova-cell0-db-purge-29401920-5drnw" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.397233 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce1d9883-706a-4df4-82e8-255c03424e7a-config-data\") pod \"nova-cell0-db-purge-29401920-5drnw\" (UID: \"ce1d9883-706a-4df4-82e8-255c03424e7a\") " pod="openstack/nova-cell0-db-purge-29401920-5drnw" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.397269 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqq9w\" (UniqueName: \"kubernetes.io/projected/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-kube-api-access-sqq9w\") pod \"nova-cell1-db-purge-29401920-qtzfs\" (UID: \"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1\") " pod="openstack/nova-cell1-db-purge-29401920-qtzfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.397290 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-scripts\") pod \"nova-cell1-db-purge-29401920-qtzfs\" (UID: \"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1\") " pod="openstack/nova-cell1-db-purge-29401920-qtzfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.397315 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f26f45b5-c987-4eba-9593-98007bd1ffe0-secret-volume\") pod \"collect-profiles-29401920-67qfs\" (UID: \"f26f45b5-c987-4eba-9593-98007bd1ffe0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-67qfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.397337 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7hgb\" (UniqueName: \"kubernetes.io/projected/f26f45b5-c987-4eba-9593-98007bd1ffe0-kube-api-access-r7hgb\") pod \"collect-profiles-29401920-67qfs\" (UID: \"f26f45b5-c987-4eba-9593-98007bd1ffe0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-67qfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.397353 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5txw9\" (UniqueName: \"kubernetes.io/projected/e1a924fe-809e-4cd2-9e0a-1859228aa009-kube-api-access-5txw9\") pod \"image-pruner-29401920-mp99l\" (UID: \"e1a924fe-809e-4cd2-9e0a-1859228aa009\") " pod="openshift-image-registry/image-pruner-29401920-mp99l" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.397389 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-config-data\") pod \"nova-cell1-db-purge-29401920-qtzfs\" (UID: \"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1\") " pod="openstack/nova-cell1-db-purge-29401920-qtzfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.397421 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce1d9883-706a-4df4-82e8-255c03424e7a-combined-ca-bundle\") pod \"nova-cell0-db-purge-29401920-5drnw\" (UID: \"ce1d9883-706a-4df4-82e8-255c03424e7a\") " pod="openstack/nova-cell0-db-purge-29401920-5drnw" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.397465 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f26f45b5-c987-4eba-9593-98007bd1ffe0-config-volume\") pod \"collect-profiles-29401920-67qfs\" (UID: \"f26f45b5-c987-4eba-9593-98007bd1ffe0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-67qfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.397511 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-combined-ca-bundle\") pod \"nova-cell1-db-purge-29401920-qtzfs\" (UID: \"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1\") " pod="openstack/nova-cell1-db-purge-29401920-qtzfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.398392 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/e1a924fe-809e-4cd2-9e0a-1859228aa009-serviceca\") pod \"image-pruner-29401920-mp99l\" (UID: \"e1a924fe-809e-4cd2-9e0a-1859228aa009\") " pod="openshift-image-registry/image-pruner-29401920-mp99l" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.399152 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f26f45b5-c987-4eba-9593-98007bd1ffe0-config-volume\") pod \"collect-profiles-29401920-67qfs\" (UID: \"f26f45b5-c987-4eba-9593-98007bd1ffe0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-67qfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.412872 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f26f45b5-c987-4eba-9593-98007bd1ffe0-secret-volume\") pod \"collect-profiles-29401920-67qfs\" (UID: \"f26f45b5-c987-4eba-9593-98007bd1ffe0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-67qfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.414395 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5txw9\" (UniqueName: \"kubernetes.io/projected/e1a924fe-809e-4cd2-9e0a-1859228aa009-kube-api-access-5txw9\") pod \"image-pruner-29401920-mp99l\" (UID: \"e1a924fe-809e-4cd2-9e0a-1859228aa009\") " pod="openshift-image-registry/image-pruner-29401920-mp99l" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.416063 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7hgb\" (UniqueName: \"kubernetes.io/projected/f26f45b5-c987-4eba-9593-98007bd1ffe0-kube-api-access-r7hgb\") pod \"collect-profiles-29401920-67qfs\" (UID: \"f26f45b5-c987-4eba-9593-98007bd1ffe0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-67qfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.429967 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_74ac97d4-b89e-47c2-b249-9e70da06d165/setup-container/0.log" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.443425 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_73cc82f8-4d6b-4608-9881-664a8194fc6f/setup-container/0.log" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.495663 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_74ac97d4-b89e-47c2-b249-9e70da06d165/rabbitmq/0.log" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.500919 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ce1d9883-706a-4df4-82e8-255c03424e7a-scripts\") pod \"nova-cell0-db-purge-29401920-5drnw\" (UID: \"ce1d9883-706a-4df4-82e8-255c03424e7a\") " pod="openstack/nova-cell0-db-purge-29401920-5drnw" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.500983 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmp7z\" (UniqueName: \"kubernetes.io/projected/ce1d9883-706a-4df4-82e8-255c03424e7a-kube-api-access-zmp7z\") pod \"nova-cell0-db-purge-29401920-5drnw\" (UID: \"ce1d9883-706a-4df4-82e8-255c03424e7a\") " pod="openstack/nova-cell0-db-purge-29401920-5drnw" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.501019 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce1d9883-706a-4df4-82e8-255c03424e7a-config-data\") pod \"nova-cell0-db-purge-29401920-5drnw\" (UID: \"ce1d9883-706a-4df4-82e8-255c03424e7a\") " pod="openstack/nova-cell0-db-purge-29401920-5drnw" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.501063 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqq9w\" (UniqueName: \"kubernetes.io/projected/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-kube-api-access-sqq9w\") pod \"nova-cell1-db-purge-29401920-qtzfs\" (UID: \"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1\") " pod="openstack/nova-cell1-db-purge-29401920-qtzfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.501083 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-scripts\") pod \"nova-cell1-db-purge-29401920-qtzfs\" (UID: \"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1\") " pod="openstack/nova-cell1-db-purge-29401920-qtzfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.501120 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-config-data\") pod \"nova-cell1-db-purge-29401920-qtzfs\" (UID: \"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1\") " pod="openstack/nova-cell1-db-purge-29401920-qtzfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.501146 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce1d9883-706a-4df4-82e8-255c03424e7a-combined-ca-bundle\") pod \"nova-cell0-db-purge-29401920-5drnw\" (UID: \"ce1d9883-706a-4df4-82e8-255c03424e7a\") " pod="openstack/nova-cell0-db-purge-29401920-5drnw" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.501198 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-combined-ca-bundle\") pod \"nova-cell1-db-purge-29401920-qtzfs\" (UID: \"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1\") " pod="openstack/nova-cell1-db-purge-29401920-qtzfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.507338 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ce1d9883-706a-4df4-82e8-255c03424e7a-scripts\") pod \"nova-cell0-db-purge-29401920-5drnw\" (UID: \"ce1d9883-706a-4df4-82e8-255c03424e7a\") " pod="openstack/nova-cell0-db-purge-29401920-5drnw" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.510794 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce1d9883-706a-4df4-82e8-255c03424e7a-config-data\") pod \"nova-cell0-db-purge-29401920-5drnw\" (UID: \"ce1d9883-706a-4df4-82e8-255c03424e7a\") " pod="openstack/nova-cell0-db-purge-29401920-5drnw" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.511391 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-config-data\") pod \"nova-cell1-db-purge-29401920-qtzfs\" (UID: \"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1\") " pod="openstack/nova-cell1-db-purge-29401920-qtzfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.511905 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce1d9883-706a-4df4-82e8-255c03424e7a-combined-ca-bundle\") pod \"nova-cell0-db-purge-29401920-5drnw\" (UID: \"ce1d9883-706a-4df4-82e8-255c03424e7a\") " pod="openstack/nova-cell0-db-purge-29401920-5drnw" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.513310 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-combined-ca-bundle\") pod \"nova-cell1-db-purge-29401920-qtzfs\" (UID: \"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1\") " pod="openstack/nova-cell1-db-purge-29401920-qtzfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.514247 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-scripts\") pod \"nova-cell1-db-purge-29401920-qtzfs\" (UID: \"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1\") " pod="openstack/nova-cell1-db-purge-29401920-qtzfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.530322 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmp7z\" (UniqueName: \"kubernetes.io/projected/ce1d9883-706a-4df4-82e8-255c03424e7a-kube-api-access-zmp7z\") pod \"nova-cell0-db-purge-29401920-5drnw\" (UID: \"ce1d9883-706a-4df4-82e8-255c03424e7a\") " pod="openstack/nova-cell0-db-purge-29401920-5drnw" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.530928 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqq9w\" (UniqueName: \"kubernetes.io/projected/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-kube-api-access-sqq9w\") pod \"nova-cell1-db-purge-29401920-qtzfs\" (UID: \"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1\") " pod="openstack/nova-cell1-db-purge-29401920-qtzfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.540546 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.540600 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.540642 5045 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.541269 5045 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b"} pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.541322 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" containerID="cri-o://cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" gracePeriod=600 Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.578025 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29401920-mp99l" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.592541 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-67qfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.613801 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-purge-29401920-qtzfs" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.641293 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-purge-29401920-5drnw" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.683728 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_73cc82f8-4d6b-4608-9881-664a8194fc6f/rabbitmq/0.log" Nov 26 00:00:00 crc kubenswrapper[5045]: E1126 00:00:00.684521 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.720895 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_73cc82f8-4d6b-4608-9881-664a8194fc6f/setup-container/0.log" Nov 26 00:00:00 crc kubenswrapper[5045]: I1126 00:00:00.772763 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc_b7c3894b-8400-492c-b4b9-b45ec555cc68/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 00:00:01 crc kubenswrapper[5045]: I1126 00:00:01.028155 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-fbvf9_96f74290-34d3-41c3-8088-09e598444e2e/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 00:00:01 crc kubenswrapper[5045]: I1126 00:00:01.132188 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr_06f796e9-76e4-4067-915d-4efd6e38226a/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 00:00:01 crc kubenswrapper[5045]: I1126 00:00:01.150191 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-cpmnq_7747bcca-12c7-4cf7-82e3-2a554f853dce/ssh-known-hosts-edpm-deployment/0.log" Nov 26 00:00:01 crc kubenswrapper[5045]: I1126 00:00:01.154161 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-pruner-29401920-mp99l"] Nov 26 00:00:01 crc kubenswrapper[5045]: I1126 00:00:01.248631 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-purge-29401920-5drnw"] Nov 26 00:00:01 crc kubenswrapper[5045]: I1126 00:00:01.269014 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29401920-mp99l" event={"ID":"e1a924fe-809e-4cd2-9e0a-1859228aa009","Type":"ContainerStarted","Data":"23257237974c31f6408187cfbd3355e0bb8284a733283edf98a70345e1fc3d97"} Nov 26 00:00:01 crc kubenswrapper[5045]: I1126 00:00:01.277147 5045 generic.go:334] "Generic (PLEG): container finished" podID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" exitCode=0 Nov 26 00:00:01 crc kubenswrapper[5045]: I1126 00:00:01.277267 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerDied","Data":"cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b"} Nov 26 00:00:01 crc kubenswrapper[5045]: I1126 00:00:01.277301 5045 scope.go:117] "RemoveContainer" containerID="8a05b3b735e54a7242e242635842fa11266f50eddb0a08bb26e8707c81df9b5b" Nov 26 00:00:01 crc kubenswrapper[5045]: I1126 00:00:01.280200 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:00:01 crc kubenswrapper[5045]: E1126 00:00:01.281135 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:00:01 crc kubenswrapper[5045]: I1126 00:00:01.289829 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-purge-29401920-5drnw" event={"ID":"ce1d9883-706a-4df4-82e8-255c03424e7a","Type":"ContainerStarted","Data":"e1a550fec889eaa0c828454257b1039028629089ec2401c8c05691481462d7fe"} Nov 26 00:00:01 crc kubenswrapper[5045]: I1126 00:00:01.306099 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_44c7f66f-9267-4165-8d51-e8fa7b33b354/tempest-tests-tempest-tests-runner/0.log" Nov 26 00:00:01 crc kubenswrapper[5045]: I1126 00:00:01.360176 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-purge-29401920-qtzfs"] Nov 26 00:00:01 crc kubenswrapper[5045]: I1126 00:00:01.380163 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401920-67qfs"] Nov 26 00:00:01 crc kubenswrapper[5045]: I1126 00:00:01.383909 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_856da688-70c4-4b00-9b63-0d93aee55d2b/test-operator-logs-container/0.log" Nov 26 00:00:01 crc kubenswrapper[5045]: I1126 00:00:01.587903 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz_8e685995-6390-45d1-948f-9aa20cef1060/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 00:00:02 crc kubenswrapper[5045]: I1126 00:00:02.300200 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29401920-mp99l" event={"ID":"e1a924fe-809e-4cd2-9e0a-1859228aa009","Type":"ContainerStarted","Data":"0965514e51c7552edb397523e24c0da2d9a2e5e7117204704ae83e4565ea143d"} Nov 26 00:00:02 crc kubenswrapper[5045]: I1126 00:00:02.310311 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-purge-29401920-5drnw" event={"ID":"ce1d9883-706a-4df4-82e8-255c03424e7a","Type":"ContainerStarted","Data":"274ad047fb1de4299daf8df5d9da893556b5d5519095d547b8ca3c0a64b621a8"} Nov 26 00:00:02 crc kubenswrapper[5045]: I1126 00:00:02.312443 5045 generic.go:334] "Generic (PLEG): container finished" podID="f26f45b5-c987-4eba-9593-98007bd1ffe0" containerID="79678aa5ebf463af64c1f703d3b9c14200bcafd343cad9ff13b8ec7445a83cb8" exitCode=0 Nov 26 00:00:02 crc kubenswrapper[5045]: I1126 00:00:02.312526 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-67qfs" event={"ID":"f26f45b5-c987-4eba-9593-98007bd1ffe0","Type":"ContainerDied","Data":"79678aa5ebf463af64c1f703d3b9c14200bcafd343cad9ff13b8ec7445a83cb8"} Nov 26 00:00:02 crc kubenswrapper[5045]: I1126 00:00:02.312560 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-67qfs" event={"ID":"f26f45b5-c987-4eba-9593-98007bd1ffe0","Type":"ContainerStarted","Data":"02a8f00c5ea9c3759bf59409e3c8cb4017a97e235663e9b9972d8b0740d2c5d7"} Nov 26 00:00:02 crc kubenswrapper[5045]: I1126 00:00:02.319519 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-purge-29401920-qtzfs" event={"ID":"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1","Type":"ContainerStarted","Data":"46840a79b0020edcb223a1abeb36024246d5fc7f9dd94d42067101ba5e3be1f4"} Nov 26 00:00:02 crc kubenswrapper[5045]: I1126 00:00:02.319568 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-purge-29401920-qtzfs" event={"ID":"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1","Type":"ContainerStarted","Data":"3122f03491ad21f173cbb5b09a00a28ed2bb4cf5f9895f3b403378035e5ca58c"} Nov 26 00:00:02 crc kubenswrapper[5045]: I1126 00:00:02.323018 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-pruner-29401920-mp99l" podStartSLOduration=2.323000993 podStartE2EDuration="2.323000993s" podCreationTimestamp="2025-11-26 00:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:00:02.315368979 +0000 UTC m=+3658.673028111" watchObservedRunningTime="2025-11-26 00:00:02.323000993 +0000 UTC m=+3658.680660105" Nov 26 00:00:02 crc kubenswrapper[5045]: I1126 00:00:02.336731 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-purge-29401920-5drnw" podStartSLOduration=2.336698117 podStartE2EDuration="2.336698117s" podCreationTimestamp="2025-11-26 00:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:00:02.329500995 +0000 UTC m=+3658.687160107" watchObservedRunningTime="2025-11-26 00:00:02.336698117 +0000 UTC m=+3658.694357249" Nov 26 00:00:02 crc kubenswrapper[5045]: I1126 00:00:02.380930 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-purge-29401920-qtzfs" podStartSLOduration=2.380913836 podStartE2EDuration="2.380913836s" podCreationTimestamp="2025-11-26 00:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:00:02.37177604 +0000 UTC m=+3658.729435172" watchObservedRunningTime="2025-11-26 00:00:02.380913836 +0000 UTC m=+3658.738572948" Nov 26 00:00:03 crc kubenswrapper[5045]: I1126 00:00:03.331330 5045 generic.go:334] "Generic (PLEG): container finished" podID="e1a924fe-809e-4cd2-9e0a-1859228aa009" containerID="0965514e51c7552edb397523e24c0da2d9a2e5e7117204704ae83e4565ea143d" exitCode=0 Nov 26 00:00:03 crc kubenswrapper[5045]: I1126 00:00:03.331381 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29401920-mp99l" event={"ID":"e1a924fe-809e-4cd2-9e0a-1859228aa009","Type":"ContainerDied","Data":"0965514e51c7552edb397523e24c0da2d9a2e5e7117204704ae83e4565ea143d"} Nov 26 00:00:03 crc kubenswrapper[5045]: I1126 00:00:03.725844 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-67qfs" Nov 26 00:00:03 crc kubenswrapper[5045]: I1126 00:00:03.777919 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f26f45b5-c987-4eba-9593-98007bd1ffe0-secret-volume\") pod \"f26f45b5-c987-4eba-9593-98007bd1ffe0\" (UID: \"f26f45b5-c987-4eba-9593-98007bd1ffe0\") " Nov 26 00:00:03 crc kubenswrapper[5045]: I1126 00:00:03.778088 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r7hgb\" (UniqueName: \"kubernetes.io/projected/f26f45b5-c987-4eba-9593-98007bd1ffe0-kube-api-access-r7hgb\") pod \"f26f45b5-c987-4eba-9593-98007bd1ffe0\" (UID: \"f26f45b5-c987-4eba-9593-98007bd1ffe0\") " Nov 26 00:00:03 crc kubenswrapper[5045]: I1126 00:00:03.778272 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f26f45b5-c987-4eba-9593-98007bd1ffe0-config-volume\") pod \"f26f45b5-c987-4eba-9593-98007bd1ffe0\" (UID: \"f26f45b5-c987-4eba-9593-98007bd1ffe0\") " Nov 26 00:00:03 crc kubenswrapper[5045]: I1126 00:00:03.779205 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f26f45b5-c987-4eba-9593-98007bd1ffe0-config-volume" (OuterVolumeSpecName: "config-volume") pod "f26f45b5-c987-4eba-9593-98007bd1ffe0" (UID: "f26f45b5-c987-4eba-9593-98007bd1ffe0"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:00:03 crc kubenswrapper[5045]: I1126 00:00:03.788354 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f26f45b5-c987-4eba-9593-98007bd1ffe0-kube-api-access-r7hgb" (OuterVolumeSpecName: "kube-api-access-r7hgb") pod "f26f45b5-c987-4eba-9593-98007bd1ffe0" (UID: "f26f45b5-c987-4eba-9593-98007bd1ffe0"). InnerVolumeSpecName "kube-api-access-r7hgb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:00:03 crc kubenswrapper[5045]: I1126 00:00:03.798893 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f26f45b5-c987-4eba-9593-98007bd1ffe0-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f26f45b5-c987-4eba-9593-98007bd1ffe0" (UID: "f26f45b5-c987-4eba-9593-98007bd1ffe0"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:00:03 crc kubenswrapper[5045]: I1126 00:00:03.881461 5045 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f26f45b5-c987-4eba-9593-98007bd1ffe0-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 00:00:03 crc kubenswrapper[5045]: I1126 00:00:03.881496 5045 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f26f45b5-c987-4eba-9593-98007bd1ffe0-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 00:00:03 crc kubenswrapper[5045]: I1126 00:00:03.881506 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r7hgb\" (UniqueName: \"kubernetes.io/projected/f26f45b5-c987-4eba-9593-98007bd1ffe0-kube-api-access-r7hgb\") on node \"crc\" DevicePath \"\"" Nov 26 00:00:04 crc kubenswrapper[5045]: I1126 00:00:04.353428 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-67qfs" Nov 26 00:00:04 crc kubenswrapper[5045]: I1126 00:00:04.357820 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-67qfs" event={"ID":"f26f45b5-c987-4eba-9593-98007bd1ffe0","Type":"ContainerDied","Data":"02a8f00c5ea9c3759bf59409e3c8cb4017a97e235663e9b9972d8b0740d2c5d7"} Nov 26 00:00:04 crc kubenswrapper[5045]: I1126 00:00:04.357862 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="02a8f00c5ea9c3759bf59409e3c8cb4017a97e235663e9b9972d8b0740d2c5d7" Nov 26 00:00:04 crc kubenswrapper[5045]: I1126 00:00:04.675294 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29401920-mp99l" Nov 26 00:00:04 crc kubenswrapper[5045]: I1126 00:00:04.813328 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5txw9\" (UniqueName: \"kubernetes.io/projected/e1a924fe-809e-4cd2-9e0a-1859228aa009-kube-api-access-5txw9\") pod \"e1a924fe-809e-4cd2-9e0a-1859228aa009\" (UID: \"e1a924fe-809e-4cd2-9e0a-1859228aa009\") " Nov 26 00:00:04 crc kubenswrapper[5045]: I1126 00:00:04.813459 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/e1a924fe-809e-4cd2-9e0a-1859228aa009-serviceca\") pod \"e1a924fe-809e-4cd2-9e0a-1859228aa009\" (UID: \"e1a924fe-809e-4cd2-9e0a-1859228aa009\") " Nov 26 00:00:04 crc kubenswrapper[5045]: I1126 00:00:04.814765 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1a924fe-809e-4cd2-9e0a-1859228aa009-serviceca" (OuterVolumeSpecName: "serviceca") pod "e1a924fe-809e-4cd2-9e0a-1859228aa009" (UID: "e1a924fe-809e-4cd2-9e0a-1859228aa009"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:00:04 crc kubenswrapper[5045]: I1126 00:00:04.826945 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1a924fe-809e-4cd2-9e0a-1859228aa009-kube-api-access-5txw9" (OuterVolumeSpecName: "kube-api-access-5txw9") pod "e1a924fe-809e-4cd2-9e0a-1859228aa009" (UID: "e1a924fe-809e-4cd2-9e0a-1859228aa009"). InnerVolumeSpecName "kube-api-access-5txw9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:00:04 crc kubenswrapper[5045]: I1126 00:00:04.828775 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv"] Nov 26 00:00:04 crc kubenswrapper[5045]: I1126 00:00:04.839342 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401875-g5mrv"] Nov 26 00:00:04 crc kubenswrapper[5045]: I1126 00:00:04.916494 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5txw9\" (UniqueName: \"kubernetes.io/projected/e1a924fe-809e-4cd2-9e0a-1859228aa009-kube-api-access-5txw9\") on node \"crc\" DevicePath \"\"" Nov 26 00:00:04 crc kubenswrapper[5045]: I1126 00:00:04.916544 5045 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/e1a924fe-809e-4cd2-9e0a-1859228aa009-serviceca\") on node \"crc\" DevicePath \"\"" Nov 26 00:00:05 crc kubenswrapper[5045]: I1126 00:00:05.362750 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29401920-mp99l" event={"ID":"e1a924fe-809e-4cd2-9e0a-1859228aa009","Type":"ContainerDied","Data":"23257237974c31f6408187cfbd3355e0bb8284a733283edf98a70345e1fc3d97"} Nov 26 00:00:05 crc kubenswrapper[5045]: I1126 00:00:05.363001 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="23257237974c31f6408187cfbd3355e0bb8284a733283edf98a70345e1fc3d97" Nov 26 00:00:05 crc kubenswrapper[5045]: I1126 00:00:05.362814 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29401920-mp99l" Nov 26 00:00:06 crc kubenswrapper[5045]: I1126 00:00:06.411851 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a274ea0e-8ce3-46aa-84e7-21bf65cd00ae" path="/var/lib/kubelet/pods/a274ea0e-8ce3-46aa-84e7-21bf65cd00ae/volumes" Nov 26 00:00:08 crc kubenswrapper[5045]: I1126 00:00:08.391862 5045 generic.go:334] "Generic (PLEG): container finished" podID="ce1d9883-706a-4df4-82e8-255c03424e7a" containerID="274ad047fb1de4299daf8df5d9da893556b5d5519095d547b8ca3c0a64b621a8" exitCode=0 Nov 26 00:00:08 crc kubenswrapper[5045]: I1126 00:00:08.391917 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-purge-29401920-5drnw" event={"ID":"ce1d9883-706a-4df4-82e8-255c03424e7a","Type":"ContainerDied","Data":"274ad047fb1de4299daf8df5d9da893556b5d5519095d547b8ca3c0a64b621a8"} Nov 26 00:00:09 crc kubenswrapper[5045]: I1126 00:00:09.404167 5045 generic.go:334] "Generic (PLEG): container finished" podID="dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1" containerID="46840a79b0020edcb223a1abeb36024246d5fc7f9dd94d42067101ba5e3be1f4" exitCode=0 Nov 26 00:00:09 crc kubenswrapper[5045]: I1126 00:00:09.404580 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-purge-29401920-qtzfs" event={"ID":"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1","Type":"ContainerDied","Data":"46840a79b0020edcb223a1abeb36024246d5fc7f9dd94d42067101ba5e3be1f4"} Nov 26 00:00:09 crc kubenswrapper[5045]: I1126 00:00:09.794154 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-purge-29401920-5drnw" Nov 26 00:00:09 crc kubenswrapper[5045]: I1126 00:00:09.915543 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ce1d9883-706a-4df4-82e8-255c03424e7a-scripts\") pod \"ce1d9883-706a-4df4-82e8-255c03424e7a\" (UID: \"ce1d9883-706a-4df4-82e8-255c03424e7a\") " Nov 26 00:00:09 crc kubenswrapper[5045]: I1126 00:00:09.915893 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmp7z\" (UniqueName: \"kubernetes.io/projected/ce1d9883-706a-4df4-82e8-255c03424e7a-kube-api-access-zmp7z\") pod \"ce1d9883-706a-4df4-82e8-255c03424e7a\" (UID: \"ce1d9883-706a-4df4-82e8-255c03424e7a\") " Nov 26 00:00:09 crc kubenswrapper[5045]: I1126 00:00:09.916071 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce1d9883-706a-4df4-82e8-255c03424e7a-combined-ca-bundle\") pod \"ce1d9883-706a-4df4-82e8-255c03424e7a\" (UID: \"ce1d9883-706a-4df4-82e8-255c03424e7a\") " Nov 26 00:00:09 crc kubenswrapper[5045]: I1126 00:00:09.916099 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce1d9883-706a-4df4-82e8-255c03424e7a-config-data\") pod \"ce1d9883-706a-4df4-82e8-255c03424e7a\" (UID: \"ce1d9883-706a-4df4-82e8-255c03424e7a\") " Nov 26 00:00:09 crc kubenswrapper[5045]: I1126 00:00:09.926815 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce1d9883-706a-4df4-82e8-255c03424e7a-scripts" (OuterVolumeSpecName: "scripts") pod "ce1d9883-706a-4df4-82e8-255c03424e7a" (UID: "ce1d9883-706a-4df4-82e8-255c03424e7a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:00:09 crc kubenswrapper[5045]: I1126 00:00:09.934216 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce1d9883-706a-4df4-82e8-255c03424e7a-kube-api-access-zmp7z" (OuterVolumeSpecName: "kube-api-access-zmp7z") pod "ce1d9883-706a-4df4-82e8-255c03424e7a" (UID: "ce1d9883-706a-4df4-82e8-255c03424e7a"). InnerVolumeSpecName "kube-api-access-zmp7z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:00:09 crc kubenswrapper[5045]: I1126 00:00:09.951803 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce1d9883-706a-4df4-82e8-255c03424e7a-config-data" (OuterVolumeSpecName: "config-data") pod "ce1d9883-706a-4df4-82e8-255c03424e7a" (UID: "ce1d9883-706a-4df4-82e8-255c03424e7a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:00:09 crc kubenswrapper[5045]: I1126 00:00:09.968844 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce1d9883-706a-4df4-82e8-255c03424e7a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ce1d9883-706a-4df4-82e8-255c03424e7a" (UID: "ce1d9883-706a-4df4-82e8-255c03424e7a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:00:10 crc kubenswrapper[5045]: I1126 00:00:10.018666 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ce1d9883-706a-4df4-82e8-255c03424e7a-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 00:00:10 crc kubenswrapper[5045]: I1126 00:00:10.018961 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zmp7z\" (UniqueName: \"kubernetes.io/projected/ce1d9883-706a-4df4-82e8-255c03424e7a-kube-api-access-zmp7z\") on node \"crc\" DevicePath \"\"" Nov 26 00:00:10 crc kubenswrapper[5045]: I1126 00:00:10.019040 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce1d9883-706a-4df4-82e8-255c03424e7a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 00:00:10 crc kubenswrapper[5045]: I1126 00:00:10.019103 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce1d9883-706a-4df4-82e8-255c03424e7a-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 00:00:10 crc kubenswrapper[5045]: I1126 00:00:10.414470 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-purge-29401920-5drnw" event={"ID":"ce1d9883-706a-4df4-82e8-255c03424e7a","Type":"ContainerDied","Data":"e1a550fec889eaa0c828454257b1039028629089ec2401c8c05691481462d7fe"} Nov 26 00:00:10 crc kubenswrapper[5045]: I1126 00:00:10.414529 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e1a550fec889eaa0c828454257b1039028629089ec2401c8c05691481462d7fe" Nov 26 00:00:10 crc kubenswrapper[5045]: I1126 00:00:10.414485 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-purge-29401920-5drnw" Nov 26 00:00:10 crc kubenswrapper[5045]: I1126 00:00:10.748615 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-purge-29401920-qtzfs" Nov 26 00:00:10 crc kubenswrapper[5045]: I1126 00:00:10.833181 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-config-data\") pod \"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1\" (UID: \"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1\") " Nov 26 00:00:10 crc kubenswrapper[5045]: I1126 00:00:10.833357 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-scripts\") pod \"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1\" (UID: \"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1\") " Nov 26 00:00:10 crc kubenswrapper[5045]: I1126 00:00:10.833397 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-combined-ca-bundle\") pod \"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1\" (UID: \"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1\") " Nov 26 00:00:10 crc kubenswrapper[5045]: I1126 00:00:10.833514 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sqq9w\" (UniqueName: \"kubernetes.io/projected/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-kube-api-access-sqq9w\") pod \"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1\" (UID: \"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1\") " Nov 26 00:00:10 crc kubenswrapper[5045]: I1126 00:00:10.845969 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-scripts" (OuterVolumeSpecName: "scripts") pod "dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1" (UID: "dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:00:10 crc kubenswrapper[5045]: I1126 00:00:10.846028 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-kube-api-access-sqq9w" (OuterVolumeSpecName: "kube-api-access-sqq9w") pod "dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1" (UID: "dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1"). InnerVolumeSpecName "kube-api-access-sqq9w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:00:10 crc kubenswrapper[5045]: I1126 00:00:10.868530 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-config-data" (OuterVolumeSpecName: "config-data") pod "dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1" (UID: "dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:00:10 crc kubenswrapper[5045]: I1126 00:00:10.884855 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1" (UID: "dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:00:10 crc kubenswrapper[5045]: I1126 00:00:10.935879 5045 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 00:00:10 crc kubenswrapper[5045]: I1126 00:00:10.935922 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 00:00:10 crc kubenswrapper[5045]: I1126 00:00:10.935935 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sqq9w\" (UniqueName: \"kubernetes.io/projected/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-kube-api-access-sqq9w\") on node \"crc\" DevicePath \"\"" Nov 26 00:00:10 crc kubenswrapper[5045]: I1126 00:00:10.935947 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 00:00:11 crc kubenswrapper[5045]: I1126 00:00:11.424108 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-purge-29401920-qtzfs" event={"ID":"dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1","Type":"ContainerDied","Data":"3122f03491ad21f173cbb5b09a00a28ed2bb4cf5f9895f3b403378035e5ca58c"} Nov 26 00:00:11 crc kubenswrapper[5045]: I1126 00:00:11.424336 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3122f03491ad21f173cbb5b09a00a28ed2bb4cf5f9895f3b403378035e5ca58c" Nov 26 00:00:11 crc kubenswrapper[5045]: I1126 00:00:11.424178 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-purge-29401920-qtzfs" Nov 26 00:00:13 crc kubenswrapper[5045]: I1126 00:00:13.397138 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:00:13 crc kubenswrapper[5045]: E1126 00:00:13.397825 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:00:22 crc kubenswrapper[5045]: I1126 00:00:22.446286 5045 scope.go:117] "RemoveContainer" containerID="3472988225de79e28a81de5061a3c36f8b924634fb3d4afa1ab133e1753c8c5d" Nov 26 00:00:24 crc kubenswrapper[5045]: I1126 00:00:24.536034 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92_a95c5e20-6f36-41d3-a164-1768574075ee/util/0.log" Nov 26 00:00:24 crc kubenswrapper[5045]: I1126 00:00:24.688954 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92_a95c5e20-6f36-41d3-a164-1768574075ee/util/0.log" Nov 26 00:00:24 crc kubenswrapper[5045]: I1126 00:00:24.715781 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92_a95c5e20-6f36-41d3-a164-1768574075ee/pull/0.log" Nov 26 00:00:24 crc kubenswrapper[5045]: I1126 00:00:24.716595 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92_a95c5e20-6f36-41d3-a164-1768574075ee/pull/0.log" Nov 26 00:00:24 crc kubenswrapper[5045]: I1126 00:00:24.911942 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92_a95c5e20-6f36-41d3-a164-1768574075ee/pull/0.log" Nov 26 00:00:24 crc kubenswrapper[5045]: I1126 00:00:24.921310 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92_a95c5e20-6f36-41d3-a164-1768574075ee/util/0.log" Nov 26 00:00:24 crc kubenswrapper[5045]: I1126 00:00:24.928425 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92_a95c5e20-6f36-41d3-a164-1768574075ee/extract/0.log" Nov 26 00:00:25 crc kubenswrapper[5045]: I1126 00:00:25.115964 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-tkwkz_f1030448-0fd8-42d3-9a83-2e27d87c855e/kube-rbac-proxy/0.log" Nov 26 00:00:25 crc kubenswrapper[5045]: I1126 00:00:25.116110 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-hmwhx_e287d4e5-6925-42eb-a661-fded8259123f/kube-rbac-proxy/0.log" Nov 26 00:00:25 crc kubenswrapper[5045]: I1126 00:00:25.184178 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-hmwhx_e287d4e5-6925-42eb-a661-fded8259123f/manager/0.log" Nov 26 00:00:25 crc kubenswrapper[5045]: I1126 00:00:25.346103 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-tkwkz_f1030448-0fd8-42d3-9a83-2e27d87c855e/manager/0.log" Nov 26 00:00:25 crc kubenswrapper[5045]: I1126 00:00:25.380152 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-npq84_344fd1ea-983e-4515-aa8a-479ec0c46c81/kube-rbac-proxy/0.log" Nov 26 00:00:25 crc kubenswrapper[5045]: I1126 00:00:25.381243 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-npq84_344fd1ea-983e-4515-aa8a-479ec0c46c81/manager/0.log" Nov 26 00:00:25 crc kubenswrapper[5045]: I1126 00:00:25.541505 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-76f7fdd9bd-84dxm_39fa03d1-e77d-46bd-bc4c-d83960611145/kube-rbac-proxy/0.log" Nov 26 00:00:25 crc kubenswrapper[5045]: I1126 00:00:25.605234 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-76f7fdd9bd-84dxm_39fa03d1-e77d-46bd-bc4c-d83960611145/manager/0.log" Nov 26 00:00:25 crc kubenswrapper[5045]: I1126 00:00:25.715955 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-8mpkk_a1596ec7-ab5c-4e5e-ba3a-11772bdaa64b/manager/0.log" Nov 26 00:00:25 crc kubenswrapper[5045]: I1126 00:00:25.737202 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-8mpkk_a1596ec7-ab5c-4e5e-ba3a-11772bdaa64b/kube-rbac-proxy/0.log" Nov 26 00:00:25 crc kubenswrapper[5045]: I1126 00:00:25.802851 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-qzhwg_20abfff9-9e94-466e-a2bc-487a231b86a5/kube-rbac-proxy/0.log" Nov 26 00:00:25 crc kubenswrapper[5045]: I1126 00:00:25.895237 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-qzhwg_20abfff9-9e94-466e-a2bc-487a231b86a5/manager/0.log" Nov 26 00:00:25 crc kubenswrapper[5045]: I1126 00:00:25.925420 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-dhjmg_cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16/kube-rbac-proxy/0.log" Nov 26 00:00:26 crc kubenswrapper[5045]: I1126 00:00:26.131132 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-dhjmg_cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16/manager/0.log" Nov 26 00:00:26 crc kubenswrapper[5045]: I1126 00:00:26.134980 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-mb2mk_2b6f436b-9a87-463e-a7ca-48ae08ba5f10/kube-rbac-proxy/0.log" Nov 26 00:00:26 crc kubenswrapper[5045]: I1126 00:00:26.153836 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-mb2mk_2b6f436b-9a87-463e-a7ca-48ae08ba5f10/manager/0.log" Nov 26 00:00:26 crc kubenswrapper[5045]: I1126 00:00:26.312332 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-7rzvl_4c3613c7-39a6-46b5-82da-a461d37d8965/kube-rbac-proxy/0.log" Nov 26 00:00:26 crc kubenswrapper[5045]: I1126 00:00:26.354724 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-7rzvl_4c3613c7-39a6-46b5-82da-a461d37d8965/manager/0.log" Nov 26 00:00:26 crc kubenswrapper[5045]: I1126 00:00:26.396992 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:00:26 crc kubenswrapper[5045]: E1126 00:00:26.397511 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:00:26 crc kubenswrapper[5045]: I1126 00:00:26.518792 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-q7rbk_289a9811-aa55-449c-aa82-a56f4b1ef53e/kube-rbac-proxy/0.log" Nov 26 00:00:26 crc kubenswrapper[5045]: I1126 00:00:26.531270 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-q7rbk_289a9811-aa55-449c-aa82-a56f4b1ef53e/manager/0.log" Nov 26 00:00:26 crc kubenswrapper[5045]: I1126 00:00:26.567752 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-phr8m_485f4bbf-205f-4ea5-8009-a0cdeb204139/kube-rbac-proxy/0.log" Nov 26 00:00:26 crc kubenswrapper[5045]: I1126 00:00:26.698255 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-m98w9_38cf0eb2-2d59-418b-9e24-b04c72c58c9f/kube-rbac-proxy/0.log" Nov 26 00:00:26 crc kubenswrapper[5045]: I1126 00:00:26.718022 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-phr8m_485f4bbf-205f-4ea5-8009-a0cdeb204139/manager/0.log" Nov 26 00:00:26 crc kubenswrapper[5045]: I1126 00:00:26.783524 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-m98w9_38cf0eb2-2d59-418b-9e24-b04c72c58c9f/manager/0.log" Nov 26 00:00:26 crc kubenswrapper[5045]: I1126 00:00:26.931866 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-tkgqg_4b50dd58-f8a3-4ce4-b008-dd810e1a424d/kube-rbac-proxy/0.log" Nov 26 00:00:26 crc kubenswrapper[5045]: I1126 00:00:26.986552 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-tkgqg_4b50dd58-f8a3-4ce4-b008-dd810e1a424d/manager/0.log" Nov 26 00:00:27 crc kubenswrapper[5045]: I1126 00:00:27.095163 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-g4pvl_bbcd8ad7-d4b2-4eef-979f-00b2c4e1b13e/kube-rbac-proxy/0.log" Nov 26 00:00:27 crc kubenswrapper[5045]: I1126 00:00:27.111995 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-g4pvl_bbcd8ad7-d4b2-4eef-979f-00b2c4e1b13e/manager/0.log" Nov 26 00:00:27 crc kubenswrapper[5045]: I1126 00:00:27.230410 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r_c5f48852-fbb4-429b-93c8-19121a51be4a/kube-rbac-proxy/0.log" Nov 26 00:00:27 crc kubenswrapper[5045]: I1126 00:00:27.346241 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r_c5f48852-fbb4-429b-93c8-19121a51be4a/manager/0.log" Nov 26 00:00:27 crc kubenswrapper[5045]: I1126 00:00:27.740795 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-bl9nc_59064a60-9836-4862-8b6b-ba68ce13975d/registry-server/0.log" Nov 26 00:00:27 crc kubenswrapper[5045]: I1126 00:00:27.743359 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-7d45d649c4-bccwd_fc249ddc-e18a-4677-8e37-7b7d449876d9/operator/0.log" Nov 26 00:00:27 crc kubenswrapper[5045]: I1126 00:00:27.915852 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-rk9dz_35229e25-460b-4c57-9dae-6dceadf19b3f/kube-rbac-proxy/0.log" Nov 26 00:00:28 crc kubenswrapper[5045]: I1126 00:00:28.145010 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-rk9dz_35229e25-460b-4c57-9dae-6dceadf19b3f/manager/0.log" Nov 26 00:00:28 crc kubenswrapper[5045]: I1126 00:00:28.500162 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-2pfmh_da63a4ac-64af-4d60-b968-274c9960b665/kube-rbac-proxy/0.log" Nov 26 00:00:28 crc kubenswrapper[5045]: I1126 00:00:28.515356 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-85bpk_6c15a559-c39e-47a5-83b2-74a6e830c1b2/operator/0.log" Nov 26 00:00:28 crc kubenswrapper[5045]: I1126 00:00:28.542719 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-2pfmh_da63a4ac-64af-4d60-b968-274c9960b665/manager/0.log" Nov 26 00:00:28 crc kubenswrapper[5045]: I1126 00:00:28.689420 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-n7s8v_9092d9d6-3e10-4f43-84e7-121153c39104/kube-rbac-proxy/0.log" Nov 26 00:00:28 crc kubenswrapper[5045]: I1126 00:00:28.690647 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-n7s8v_9092d9d6-3e10-4f43-84e7-121153c39104/manager/0.log" Nov 26 00:00:28 crc kubenswrapper[5045]: I1126 00:00:28.756769 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-746744c96-rtr9q_aeeca19c-1da3-4bc7-934d-fa4c8663ca04/manager/0.log" Nov 26 00:00:28 crc kubenswrapper[5045]: I1126 00:00:28.797384 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-jzkxl_39ce2a8b-211e-4bb4-91a3-0999e4f45162/kube-rbac-proxy/0.log" Nov 26 00:00:28 crc kubenswrapper[5045]: I1126 00:00:28.913253 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-jzkxl_39ce2a8b-211e-4bb4-91a3-0999e4f45162/manager/0.log" Nov 26 00:00:28 crc kubenswrapper[5045]: I1126 00:00:28.974568 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-vsfg5_74cee554-ae39-4dd6-b932-dc432e32cda0/kube-rbac-proxy/0.log" Nov 26 00:00:28 crc kubenswrapper[5045]: I1126 00:00:28.979006 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-vsfg5_74cee554-ae39-4dd6-b932-dc432e32cda0/manager/0.log" Nov 26 00:00:29 crc kubenswrapper[5045]: I1126 00:00:29.085435 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-r4wcc_eb320624-b1a8-45b6-891f-0b4517a5376e/manager/0.log" Nov 26 00:00:29 crc kubenswrapper[5045]: I1126 00:00:29.091949 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-r4wcc_eb320624-b1a8-45b6-891f-0b4517a5376e/kube-rbac-proxy/0.log" Nov 26 00:00:41 crc kubenswrapper[5045]: I1126 00:00:41.396928 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:00:41 crc kubenswrapper[5045]: E1126 00:00:41.397703 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:00:47 crc kubenswrapper[5045]: I1126 00:00:47.486634 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-955fw_7e777e65-538b-4823-abd9-f6c387f3fba3/control-plane-machine-set-operator/0.log" Nov 26 00:00:47 crc kubenswrapper[5045]: I1126 00:00:47.664675 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-tst4h_601df050-5421-4266-bf7c-60096a066a24/kube-rbac-proxy/0.log" Nov 26 00:00:47 crc kubenswrapper[5045]: I1126 00:00:47.711206 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-tst4h_601df050-5421-4266-bf7c-60096a066a24/machine-api-operator/0.log" Nov 26 00:00:52 crc kubenswrapper[5045]: I1126 00:00:52.397239 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:00:52 crc kubenswrapper[5045]: E1126 00:00:52.398208 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.146921 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-purge-29401921-gs822"] Nov 26 00:01:00 crc kubenswrapper[5045]: E1126 00:01:00.148002 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1" containerName="nova-manage" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.148023 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1" containerName="nova-manage" Nov 26 00:01:00 crc kubenswrapper[5045]: E1126 00:01:00.148050 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1a924fe-809e-4cd2-9e0a-1859228aa009" containerName="image-pruner" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.148058 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1a924fe-809e-4cd2-9e0a-1859228aa009" containerName="image-pruner" Nov 26 00:01:00 crc kubenswrapper[5045]: E1126 00:01:00.148076 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f26f45b5-c987-4eba-9593-98007bd1ffe0" containerName="collect-profiles" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.148084 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="f26f45b5-c987-4eba-9593-98007bd1ffe0" containerName="collect-profiles" Nov 26 00:01:00 crc kubenswrapper[5045]: E1126 00:01:00.148101 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce1d9883-706a-4df4-82e8-255c03424e7a" containerName="nova-manage" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.148109 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce1d9883-706a-4df4-82e8-255c03424e7a" containerName="nova-manage" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.148344 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="f26f45b5-c987-4eba-9593-98007bd1ffe0" containerName="collect-profiles" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.148365 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1a924fe-809e-4cd2-9e0a-1859228aa009" containerName="image-pruner" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.148389 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1" containerName="nova-manage" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.148408 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce1d9883-706a-4df4-82e8-255c03424e7a" containerName="nova-manage" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.149307 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-purge-29401921-gs822" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.154992 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29401921-6p59q"] Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.156300 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.156527 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401921-6p59q" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.171580 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-purge-29401921-mw666"] Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.173290 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-purge-29401921-mw666" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.207039 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-purge-29401921-6pglr"] Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.208469 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-purge-29401921-6pglr" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.214076 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-purge-29401921-gs822"] Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.223211 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-purge-29401921-6pglr"] Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.235927 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401921-6p59q"] Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.243818 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-purge-29401921-mw666"] Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.278741 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14363206-5b08-472d-8fe1-4950a2378e74-combined-ca-bundle\") pod \"glance-db-purge-29401921-gs822\" (UID: \"14363206-5b08-472d-8fe1-4950a2378e74\") " pod="openstack/glance-db-purge-29401921-gs822" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.278824 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hglkz\" (UniqueName: \"kubernetes.io/projected/376db204-9db9-4576-a300-452841866605-kube-api-access-hglkz\") pod \"cinder-db-purge-29401921-mw666\" (UID: \"376db204-9db9-4576-a300-452841866605\") " pod="openstack/cinder-db-purge-29401921-mw666" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.278870 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/376db204-9db9-4576-a300-452841866605-db-purge-config-data\") pod \"cinder-db-purge-29401921-mw666\" (UID: \"376db204-9db9-4576-a300-452841866605\") " pod="openstack/cinder-db-purge-29401921-mw666" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.278893 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-clfmd\" (UniqueName: \"kubernetes.io/projected/b5251d69-bf33-43f9-ad5f-ea0937a59e10-kube-api-access-clfmd\") pod \"keystone-cron-29401921-6p59q\" (UID: \"b5251d69-bf33-43f9-ad5f-ea0937a59e10\") " pod="openstack/keystone-cron-29401921-6p59q" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.278931 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/14363206-5b08-472d-8fe1-4950a2378e74-db-purge-config-data\") pod \"glance-db-purge-29401921-gs822\" (UID: \"14363206-5b08-472d-8fe1-4950a2378e74\") " pod="openstack/glance-db-purge-29401921-gs822" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.278967 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/376db204-9db9-4576-a300-452841866605-combined-ca-bundle\") pod \"cinder-db-purge-29401921-mw666\" (UID: \"376db204-9db9-4576-a300-452841866605\") " pod="openstack/cinder-db-purge-29401921-mw666" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.279002 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gz4hx\" (UniqueName: \"kubernetes.io/projected/14363206-5b08-472d-8fe1-4950a2378e74-kube-api-access-gz4hx\") pod \"glance-db-purge-29401921-gs822\" (UID: \"14363206-5b08-472d-8fe1-4950a2378e74\") " pod="openstack/glance-db-purge-29401921-gs822" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.279035 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b5251d69-bf33-43f9-ad5f-ea0937a59e10-fernet-keys\") pod \"keystone-cron-29401921-6p59q\" (UID: \"b5251d69-bf33-43f9-ad5f-ea0937a59e10\") " pod="openstack/keystone-cron-29401921-6p59q" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.279063 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14363206-5b08-472d-8fe1-4950a2378e74-config-data\") pod \"glance-db-purge-29401921-gs822\" (UID: \"14363206-5b08-472d-8fe1-4950a2378e74\") " pod="openstack/glance-db-purge-29401921-gs822" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.279158 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/376db204-9db9-4576-a300-452841866605-config-data\") pod \"cinder-db-purge-29401921-mw666\" (UID: \"376db204-9db9-4576-a300-452841866605\") " pod="openstack/cinder-db-purge-29401921-mw666" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.279191 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5251d69-bf33-43f9-ad5f-ea0937a59e10-config-data\") pod \"keystone-cron-29401921-6p59q\" (UID: \"b5251d69-bf33-43f9-ad5f-ea0937a59e10\") " pod="openstack/keystone-cron-29401921-6p59q" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.279213 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5251d69-bf33-43f9-ad5f-ea0937a59e10-combined-ca-bundle\") pod \"keystone-cron-29401921-6p59q\" (UID: \"b5251d69-bf33-43f9-ad5f-ea0937a59e10\") " pod="openstack/keystone-cron-29401921-6p59q" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.387138 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hglkz\" (UniqueName: \"kubernetes.io/projected/376db204-9db9-4576-a300-452841866605-kube-api-access-hglkz\") pod \"cinder-db-purge-29401921-mw666\" (UID: \"376db204-9db9-4576-a300-452841866605\") " pod="openstack/cinder-db-purge-29401921-mw666" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.387203 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/376db204-9db9-4576-a300-452841866605-db-purge-config-data\") pod \"cinder-db-purge-29401921-mw666\" (UID: \"376db204-9db9-4576-a300-452841866605\") " pod="openstack/cinder-db-purge-29401921-mw666" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.387233 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-clfmd\" (UniqueName: \"kubernetes.io/projected/b5251d69-bf33-43f9-ad5f-ea0937a59e10-kube-api-access-clfmd\") pod \"keystone-cron-29401921-6p59q\" (UID: \"b5251d69-bf33-43f9-ad5f-ea0937a59e10\") " pod="openstack/keystone-cron-29401921-6p59q" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.387265 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mtw6x\" (UniqueName: \"kubernetes.io/projected/cef47d90-76d7-4f52-8583-94f0decfd788-kube-api-access-mtw6x\") pod \"manila-db-purge-29401921-6pglr\" (UID: \"cef47d90-76d7-4f52-8583-94f0decfd788\") " pod="openstack/manila-db-purge-29401921-6pglr" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.387306 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/14363206-5b08-472d-8fe1-4950a2378e74-db-purge-config-data\") pod \"glance-db-purge-29401921-gs822\" (UID: \"14363206-5b08-472d-8fe1-4950a2378e74\") " pod="openstack/glance-db-purge-29401921-gs822" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.387347 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cef47d90-76d7-4f52-8583-94f0decfd788-combined-ca-bundle\") pod \"manila-db-purge-29401921-6pglr\" (UID: \"cef47d90-76d7-4f52-8583-94f0decfd788\") " pod="openstack/manila-db-purge-29401921-6pglr" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.387375 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/376db204-9db9-4576-a300-452841866605-combined-ca-bundle\") pod \"cinder-db-purge-29401921-mw666\" (UID: \"376db204-9db9-4576-a300-452841866605\") " pod="openstack/cinder-db-purge-29401921-mw666" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.387413 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gz4hx\" (UniqueName: \"kubernetes.io/projected/14363206-5b08-472d-8fe1-4950a2378e74-kube-api-access-gz4hx\") pod \"glance-db-purge-29401921-gs822\" (UID: \"14363206-5b08-472d-8fe1-4950a2378e74\") " pod="openstack/glance-db-purge-29401921-gs822" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.387448 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b5251d69-bf33-43f9-ad5f-ea0937a59e10-fernet-keys\") pod \"keystone-cron-29401921-6p59q\" (UID: \"b5251d69-bf33-43f9-ad5f-ea0937a59e10\") " pod="openstack/keystone-cron-29401921-6p59q" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.387473 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14363206-5b08-472d-8fe1-4950a2378e74-config-data\") pod \"glance-db-purge-29401921-gs822\" (UID: \"14363206-5b08-472d-8fe1-4950a2378e74\") " pod="openstack/glance-db-purge-29401921-gs822" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.387534 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/cef47d90-76d7-4f52-8583-94f0decfd788-db-purge-config-data\") pod \"manila-db-purge-29401921-6pglr\" (UID: \"cef47d90-76d7-4f52-8583-94f0decfd788\") " pod="openstack/manila-db-purge-29401921-6pglr" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.387596 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/376db204-9db9-4576-a300-452841866605-config-data\") pod \"cinder-db-purge-29401921-mw666\" (UID: \"376db204-9db9-4576-a300-452841866605\") " pod="openstack/cinder-db-purge-29401921-mw666" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.387637 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5251d69-bf33-43f9-ad5f-ea0937a59e10-config-data\") pod \"keystone-cron-29401921-6p59q\" (UID: \"b5251d69-bf33-43f9-ad5f-ea0937a59e10\") " pod="openstack/keystone-cron-29401921-6p59q" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.387662 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5251d69-bf33-43f9-ad5f-ea0937a59e10-combined-ca-bundle\") pod \"keystone-cron-29401921-6p59q\" (UID: \"b5251d69-bf33-43f9-ad5f-ea0937a59e10\") " pod="openstack/keystone-cron-29401921-6p59q" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.387704 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14363206-5b08-472d-8fe1-4950a2378e74-combined-ca-bundle\") pod \"glance-db-purge-29401921-gs822\" (UID: \"14363206-5b08-472d-8fe1-4950a2378e74\") " pod="openstack/glance-db-purge-29401921-gs822" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.408073 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/376db204-9db9-4576-a300-452841866605-combined-ca-bundle\") pod \"cinder-db-purge-29401921-mw666\" (UID: \"376db204-9db9-4576-a300-452841866605\") " pod="openstack/cinder-db-purge-29401921-mw666" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.411627 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14363206-5b08-472d-8fe1-4950a2378e74-combined-ca-bundle\") pod \"glance-db-purge-29401921-gs822\" (UID: \"14363206-5b08-472d-8fe1-4950a2378e74\") " pod="openstack/glance-db-purge-29401921-gs822" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.413567 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5251d69-bf33-43f9-ad5f-ea0937a59e10-combined-ca-bundle\") pod \"keystone-cron-29401921-6p59q\" (UID: \"b5251d69-bf33-43f9-ad5f-ea0937a59e10\") " pod="openstack/keystone-cron-29401921-6p59q" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.417384 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hglkz\" (UniqueName: \"kubernetes.io/projected/376db204-9db9-4576-a300-452841866605-kube-api-access-hglkz\") pod \"cinder-db-purge-29401921-mw666\" (UID: \"376db204-9db9-4576-a300-452841866605\") " pod="openstack/cinder-db-purge-29401921-mw666" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.417889 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14363206-5b08-472d-8fe1-4950a2378e74-config-data\") pod \"glance-db-purge-29401921-gs822\" (UID: \"14363206-5b08-472d-8fe1-4950a2378e74\") " pod="openstack/glance-db-purge-29401921-gs822" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.419434 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/14363206-5b08-472d-8fe1-4950a2378e74-db-purge-config-data\") pod \"glance-db-purge-29401921-gs822\" (UID: \"14363206-5b08-472d-8fe1-4950a2378e74\") " pod="openstack/glance-db-purge-29401921-gs822" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.419481 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/376db204-9db9-4576-a300-452841866605-db-purge-config-data\") pod \"cinder-db-purge-29401921-mw666\" (UID: \"376db204-9db9-4576-a300-452841866605\") " pod="openstack/cinder-db-purge-29401921-mw666" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.423381 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b5251d69-bf33-43f9-ad5f-ea0937a59e10-fernet-keys\") pod \"keystone-cron-29401921-6p59q\" (UID: \"b5251d69-bf33-43f9-ad5f-ea0937a59e10\") " pod="openstack/keystone-cron-29401921-6p59q" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.426910 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5251d69-bf33-43f9-ad5f-ea0937a59e10-config-data\") pod \"keystone-cron-29401921-6p59q\" (UID: \"b5251d69-bf33-43f9-ad5f-ea0937a59e10\") " pod="openstack/keystone-cron-29401921-6p59q" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.428460 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gz4hx\" (UniqueName: \"kubernetes.io/projected/14363206-5b08-472d-8fe1-4950a2378e74-kube-api-access-gz4hx\") pod \"glance-db-purge-29401921-gs822\" (UID: \"14363206-5b08-472d-8fe1-4950a2378e74\") " pod="openstack/glance-db-purge-29401921-gs822" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.435894 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/376db204-9db9-4576-a300-452841866605-config-data\") pod \"cinder-db-purge-29401921-mw666\" (UID: \"376db204-9db9-4576-a300-452841866605\") " pod="openstack/cinder-db-purge-29401921-mw666" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.443531 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-clfmd\" (UniqueName: \"kubernetes.io/projected/b5251d69-bf33-43f9-ad5f-ea0937a59e10-kube-api-access-clfmd\") pod \"keystone-cron-29401921-6p59q\" (UID: \"b5251d69-bf33-43f9-ad5f-ea0937a59e10\") " pod="openstack/keystone-cron-29401921-6p59q" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.475833 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-purge-29401921-gs822" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.489417 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/cef47d90-76d7-4f52-8583-94f0decfd788-db-purge-config-data\") pod \"manila-db-purge-29401921-6pglr\" (UID: \"cef47d90-76d7-4f52-8583-94f0decfd788\") " pod="openstack/manila-db-purge-29401921-6pglr" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.489578 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mtw6x\" (UniqueName: \"kubernetes.io/projected/cef47d90-76d7-4f52-8583-94f0decfd788-kube-api-access-mtw6x\") pod \"manila-db-purge-29401921-6pglr\" (UID: \"cef47d90-76d7-4f52-8583-94f0decfd788\") " pod="openstack/manila-db-purge-29401921-6pglr" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.489617 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cef47d90-76d7-4f52-8583-94f0decfd788-combined-ca-bundle\") pod \"manila-db-purge-29401921-6pglr\" (UID: \"cef47d90-76d7-4f52-8583-94f0decfd788\") " pod="openstack/manila-db-purge-29401921-6pglr" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.493796 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/cef47d90-76d7-4f52-8583-94f0decfd788-db-purge-config-data\") pod \"manila-db-purge-29401921-6pglr\" (UID: \"cef47d90-76d7-4f52-8583-94f0decfd788\") " pod="openstack/manila-db-purge-29401921-6pglr" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.494697 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cef47d90-76d7-4f52-8583-94f0decfd788-combined-ca-bundle\") pod \"manila-db-purge-29401921-6pglr\" (UID: \"cef47d90-76d7-4f52-8583-94f0decfd788\") " pod="openstack/manila-db-purge-29401921-6pglr" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.496424 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401921-6p59q" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.506555 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mtw6x\" (UniqueName: \"kubernetes.io/projected/cef47d90-76d7-4f52-8583-94f0decfd788-kube-api-access-mtw6x\") pod \"manila-db-purge-29401921-6pglr\" (UID: \"cef47d90-76d7-4f52-8583-94f0decfd788\") " pod="openstack/manila-db-purge-29401921-6pglr" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.513681 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-purge-29401921-mw666" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.547022 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-purge-29401921-6pglr" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.617320 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-8rvmp_581be194-4f18-4d0b-82fe-da014d72e03d/cert-manager-controller/0.log" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.864003 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-lpn7b_9f002a66-6d5b-49a1-881d-5ed4deb1a006/cert-manager-webhook/0.log" Nov 26 00:01:00 crc kubenswrapper[5045]: I1126 00:01:00.869166 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-tj5h4_ca7e1b0d-b147-46cb-9537-6026becd4866/cert-manager-cainjector/0.log" Nov 26 00:01:01 crc kubenswrapper[5045]: I1126 00:01:01.076086 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-purge-29401921-mw666"] Nov 26 00:01:01 crc kubenswrapper[5045]: I1126 00:01:01.086510 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-purge-29401921-gs822"] Nov 26 00:01:01 crc kubenswrapper[5045]: W1126 00:01:01.098883 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod14363206_5b08_472d_8fe1_4950a2378e74.slice/crio-009902ab96de6ac990ed52b3fdc5e9b39a3f4e9dc828698a7acc93b5af71bdd6 WatchSource:0}: Error finding container 009902ab96de6ac990ed52b3fdc5e9b39a3f4e9dc828698a7acc93b5af71bdd6: Status 404 returned error can't find the container with id 009902ab96de6ac990ed52b3fdc5e9b39a3f4e9dc828698a7acc93b5af71bdd6 Nov 26 00:01:01 crc kubenswrapper[5045]: I1126 00:01:01.188974 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401921-6p59q"] Nov 26 00:01:01 crc kubenswrapper[5045]: W1126 00:01:01.202501 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb5251d69_bf33_43f9_ad5f_ea0937a59e10.slice/crio-3fbd2f8d9554e60321c0c231b56caec0aea173c5e43f2fb6dffe155cc1d721a5 WatchSource:0}: Error finding container 3fbd2f8d9554e60321c0c231b56caec0aea173c5e43f2fb6dffe155cc1d721a5: Status 404 returned error can't find the container with id 3fbd2f8d9554e60321c0c231b56caec0aea173c5e43f2fb6dffe155cc1d721a5 Nov 26 00:01:01 crc kubenswrapper[5045]: I1126 00:01:01.290470 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-purge-29401921-6pglr"] Nov 26 00:01:01 crc kubenswrapper[5045]: W1126 00:01:01.297911 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcef47d90_76d7_4f52_8583_94f0decfd788.slice/crio-a38f9a1db6a92b1254589f77b5c5835d54fe08372e6d27313985f060ee186dcb WatchSource:0}: Error finding container a38f9a1db6a92b1254589f77b5c5835d54fe08372e6d27313985f060ee186dcb: Status 404 returned error can't find the container with id a38f9a1db6a92b1254589f77b5c5835d54fe08372e6d27313985f060ee186dcb Nov 26 00:01:01 crc kubenswrapper[5045]: I1126 00:01:01.892593 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401921-6p59q" event={"ID":"b5251d69-bf33-43f9-ad5f-ea0937a59e10","Type":"ContainerStarted","Data":"0ae674dece414f6d7ef372598c88f749322aa3fcdadab35f6fbca2d04e5c570f"} Nov 26 00:01:01 crc kubenswrapper[5045]: I1126 00:01:01.892978 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401921-6p59q" event={"ID":"b5251d69-bf33-43f9-ad5f-ea0937a59e10","Type":"ContainerStarted","Data":"3fbd2f8d9554e60321c0c231b56caec0aea173c5e43f2fb6dffe155cc1d721a5"} Nov 26 00:01:01 crc kubenswrapper[5045]: I1126 00:01:01.895730 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-purge-29401921-mw666" event={"ID":"376db204-9db9-4576-a300-452841866605","Type":"ContainerStarted","Data":"e8b154357d448f95ba8e35b2a24150f952f93c450bd79cee6dbfb92356123f59"} Nov 26 00:01:01 crc kubenswrapper[5045]: I1126 00:01:01.897422 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-purge-29401921-gs822" event={"ID":"14363206-5b08-472d-8fe1-4950a2378e74","Type":"ContainerStarted","Data":"2ae14397b615a60243cc0a17afa8987dab8a831603cd7e219cc2b5f4f9cbfbee"} Nov 26 00:01:01 crc kubenswrapper[5045]: I1126 00:01:01.897459 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-purge-29401921-gs822" event={"ID":"14363206-5b08-472d-8fe1-4950a2378e74","Type":"ContainerStarted","Data":"009902ab96de6ac990ed52b3fdc5e9b39a3f4e9dc828698a7acc93b5af71bdd6"} Nov 26 00:01:01 crc kubenswrapper[5045]: I1126 00:01:01.898939 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-purge-29401921-6pglr" event={"ID":"cef47d90-76d7-4f52-8583-94f0decfd788","Type":"ContainerStarted","Data":"1e167d3a5953786ecddf2625c88257add120bc9715c96992a86cf23c35de9ba0"} Nov 26 00:01:01 crc kubenswrapper[5045]: I1126 00:01:01.898981 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-purge-29401921-6pglr" event={"ID":"cef47d90-76d7-4f52-8583-94f0decfd788","Type":"ContainerStarted","Data":"a38f9a1db6a92b1254589f77b5c5835d54fe08372e6d27313985f060ee186dcb"} Nov 26 00:01:01 crc kubenswrapper[5045]: I1126 00:01:01.911145 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29401921-6p59q" podStartSLOduration=1.9111315370000002 podStartE2EDuration="1.911131537s" podCreationTimestamp="2025-11-26 00:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:01:01.90555116 +0000 UTC m=+3718.263210272" watchObservedRunningTime="2025-11-26 00:01:01.911131537 +0000 UTC m=+3718.268790649" Nov 26 00:01:01 crc kubenswrapper[5045]: I1126 00:01:01.931817 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-purge-29401921-6pglr" podStartSLOduration=1.931800579 podStartE2EDuration="1.931800579s" podCreationTimestamp="2025-11-26 00:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:01:01.920885912 +0000 UTC m=+3718.278545024" watchObservedRunningTime="2025-11-26 00:01:01.931800579 +0000 UTC m=+3718.289459691" Nov 26 00:01:01 crc kubenswrapper[5045]: I1126 00:01:01.943420 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-purge-29401921-gs822" podStartSLOduration=1.943404516 podStartE2EDuration="1.943404516s" podCreationTimestamp="2025-11-26 00:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:01:01.936248864 +0000 UTC m=+3718.293907966" watchObservedRunningTime="2025-11-26 00:01:01.943404516 +0000 UTC m=+3718.301063628" Nov 26 00:01:02 crc kubenswrapper[5045]: I1126 00:01:02.911684 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-purge-29401921-mw666" event={"ID":"376db204-9db9-4576-a300-452841866605","Type":"ContainerStarted","Data":"d40ae5e85fbd39ff8b579c8fc5c46156085a8f0fac173a208d2ec6a63de6b55f"} Nov 26 00:01:02 crc kubenswrapper[5045]: I1126 00:01:02.935261 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-purge-29401921-mw666" podStartSLOduration=2.935239443 podStartE2EDuration="2.935239443s" podCreationTimestamp="2025-11-26 00:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:01:02.934377529 +0000 UTC m=+3719.292036651" watchObservedRunningTime="2025-11-26 00:01:02.935239443 +0000 UTC m=+3719.292898555" Nov 26 00:01:03 crc kubenswrapper[5045]: I1126 00:01:03.935155 5045 generic.go:334] "Generic (PLEG): container finished" podID="cef47d90-76d7-4f52-8583-94f0decfd788" containerID="1e167d3a5953786ecddf2625c88257add120bc9715c96992a86cf23c35de9ba0" exitCode=0 Nov 26 00:01:03 crc kubenswrapper[5045]: I1126 00:01:03.935241 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-purge-29401921-6pglr" event={"ID":"cef47d90-76d7-4f52-8583-94f0decfd788","Type":"ContainerDied","Data":"1e167d3a5953786ecddf2625c88257add120bc9715c96992a86cf23c35de9ba0"} Nov 26 00:01:03 crc kubenswrapper[5045]: I1126 00:01:03.944005 5045 generic.go:334] "Generic (PLEG): container finished" podID="14363206-5b08-472d-8fe1-4950a2378e74" containerID="2ae14397b615a60243cc0a17afa8987dab8a831603cd7e219cc2b5f4f9cbfbee" exitCode=0 Nov 26 00:01:03 crc kubenswrapper[5045]: I1126 00:01:03.944883 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-purge-29401921-gs822" event={"ID":"14363206-5b08-472d-8fe1-4950a2378e74","Type":"ContainerDied","Data":"2ae14397b615a60243cc0a17afa8987dab8a831603cd7e219cc2b5f4f9cbfbee"} Nov 26 00:01:04 crc kubenswrapper[5045]: I1126 00:01:04.954919 5045 generic.go:334] "Generic (PLEG): container finished" podID="b5251d69-bf33-43f9-ad5f-ea0937a59e10" containerID="0ae674dece414f6d7ef372598c88f749322aa3fcdadab35f6fbca2d04e5c570f" exitCode=0 Nov 26 00:01:04 crc kubenswrapper[5045]: I1126 00:01:04.955015 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401921-6p59q" event={"ID":"b5251d69-bf33-43f9-ad5f-ea0937a59e10","Type":"ContainerDied","Data":"0ae674dece414f6d7ef372598c88f749322aa3fcdadab35f6fbca2d04e5c570f"} Nov 26 00:01:04 crc kubenswrapper[5045]: I1126 00:01:04.958773 5045 generic.go:334] "Generic (PLEG): container finished" podID="376db204-9db9-4576-a300-452841866605" containerID="d40ae5e85fbd39ff8b579c8fc5c46156085a8f0fac173a208d2ec6a63de6b55f" exitCode=0 Nov 26 00:01:04 crc kubenswrapper[5045]: I1126 00:01:04.958842 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-purge-29401921-mw666" event={"ID":"376db204-9db9-4576-a300-452841866605","Type":"ContainerDied","Data":"d40ae5e85fbd39ff8b579c8fc5c46156085a8f0fac173a208d2ec6a63de6b55f"} Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.303660 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-purge-29401921-gs822" Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.389097 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14363206-5b08-472d-8fe1-4950a2378e74-combined-ca-bundle\") pod \"14363206-5b08-472d-8fe1-4950a2378e74\" (UID: \"14363206-5b08-472d-8fe1-4950a2378e74\") " Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.389480 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gz4hx\" (UniqueName: \"kubernetes.io/projected/14363206-5b08-472d-8fe1-4950a2378e74-kube-api-access-gz4hx\") pod \"14363206-5b08-472d-8fe1-4950a2378e74\" (UID: \"14363206-5b08-472d-8fe1-4950a2378e74\") " Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.389559 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14363206-5b08-472d-8fe1-4950a2378e74-config-data\") pod \"14363206-5b08-472d-8fe1-4950a2378e74\" (UID: \"14363206-5b08-472d-8fe1-4950a2378e74\") " Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.389639 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/14363206-5b08-472d-8fe1-4950a2378e74-db-purge-config-data\") pod \"14363206-5b08-472d-8fe1-4950a2378e74\" (UID: \"14363206-5b08-472d-8fe1-4950a2378e74\") " Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.394877 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14363206-5b08-472d-8fe1-4950a2378e74-db-purge-config-data" (OuterVolumeSpecName: "db-purge-config-data") pod "14363206-5b08-472d-8fe1-4950a2378e74" (UID: "14363206-5b08-472d-8fe1-4950a2378e74"). InnerVolumeSpecName "db-purge-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.395421 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14363206-5b08-472d-8fe1-4950a2378e74-kube-api-access-gz4hx" (OuterVolumeSpecName: "kube-api-access-gz4hx") pod "14363206-5b08-472d-8fe1-4950a2378e74" (UID: "14363206-5b08-472d-8fe1-4950a2378e74"). InnerVolumeSpecName "kube-api-access-gz4hx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.419982 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14363206-5b08-472d-8fe1-4950a2378e74-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "14363206-5b08-472d-8fe1-4950a2378e74" (UID: "14363206-5b08-472d-8fe1-4950a2378e74"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.430244 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14363206-5b08-472d-8fe1-4950a2378e74-config-data" (OuterVolumeSpecName: "config-data") pod "14363206-5b08-472d-8fe1-4950a2378e74" (UID: "14363206-5b08-472d-8fe1-4950a2378e74"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.492477 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14363206-5b08-472d-8fe1-4950a2378e74-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.492514 5045 reconciler_common.go:293] "Volume detached for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/14363206-5b08-472d-8fe1-4950a2378e74-db-purge-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.492523 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14363206-5b08-472d-8fe1-4950a2378e74-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.492532 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gz4hx\" (UniqueName: \"kubernetes.io/projected/14363206-5b08-472d-8fe1-4950a2378e74-kube-api-access-gz4hx\") on node \"crc\" DevicePath \"\"" Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.500608 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-purge-29401921-6pglr" Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.593543 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cef47d90-76d7-4f52-8583-94f0decfd788-combined-ca-bundle\") pod \"cef47d90-76d7-4f52-8583-94f0decfd788\" (UID: \"cef47d90-76d7-4f52-8583-94f0decfd788\") " Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.593745 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/cef47d90-76d7-4f52-8583-94f0decfd788-db-purge-config-data\") pod \"cef47d90-76d7-4f52-8583-94f0decfd788\" (UID: \"cef47d90-76d7-4f52-8583-94f0decfd788\") " Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.593775 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mtw6x\" (UniqueName: \"kubernetes.io/projected/cef47d90-76d7-4f52-8583-94f0decfd788-kube-api-access-mtw6x\") pod \"cef47d90-76d7-4f52-8583-94f0decfd788\" (UID: \"cef47d90-76d7-4f52-8583-94f0decfd788\") " Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.596740 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cef47d90-76d7-4f52-8583-94f0decfd788-kube-api-access-mtw6x" (OuterVolumeSpecName: "kube-api-access-mtw6x") pod "cef47d90-76d7-4f52-8583-94f0decfd788" (UID: "cef47d90-76d7-4f52-8583-94f0decfd788"). InnerVolumeSpecName "kube-api-access-mtw6x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.597064 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cef47d90-76d7-4f52-8583-94f0decfd788-db-purge-config-data" (OuterVolumeSpecName: "db-purge-config-data") pod "cef47d90-76d7-4f52-8583-94f0decfd788" (UID: "cef47d90-76d7-4f52-8583-94f0decfd788"). InnerVolumeSpecName "db-purge-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.617969 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cef47d90-76d7-4f52-8583-94f0decfd788-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cef47d90-76d7-4f52-8583-94f0decfd788" (UID: "cef47d90-76d7-4f52-8583-94f0decfd788"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.696471 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cef47d90-76d7-4f52-8583-94f0decfd788-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.696693 5045 reconciler_common.go:293] "Volume detached for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/cef47d90-76d7-4f52-8583-94f0decfd788-db-purge-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.696848 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mtw6x\" (UniqueName: \"kubernetes.io/projected/cef47d90-76d7-4f52-8583-94f0decfd788-kube-api-access-mtw6x\") on node \"crc\" DevicePath \"\"" Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.973515 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-purge-29401921-6pglr" Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.973511 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-purge-29401921-6pglr" event={"ID":"cef47d90-76d7-4f52-8583-94f0decfd788","Type":"ContainerDied","Data":"a38f9a1db6a92b1254589f77b5c5835d54fe08372e6d27313985f060ee186dcb"} Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.973596 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a38f9a1db6a92b1254589f77b5c5835d54fe08372e6d27313985f060ee186dcb" Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.979399 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-purge-29401921-gs822" Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.979520 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-purge-29401921-gs822" event={"ID":"14363206-5b08-472d-8fe1-4950a2378e74","Type":"ContainerDied","Data":"009902ab96de6ac990ed52b3fdc5e9b39a3f4e9dc828698a7acc93b5af71bdd6"} Nov 26 00:01:05 crc kubenswrapper[5045]: I1126 00:01:05.979584 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="009902ab96de6ac990ed52b3fdc5e9b39a3f4e9dc828698a7acc93b5af71bdd6" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.395128 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-purge-29401921-mw666" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.398837 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:01:06 crc kubenswrapper[5045]: E1126 00:01:06.399312 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.408243 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401921-6p59q" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.511671 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5251d69-bf33-43f9-ad5f-ea0937a59e10-config-data\") pod \"b5251d69-bf33-43f9-ad5f-ea0937a59e10\" (UID: \"b5251d69-bf33-43f9-ad5f-ea0937a59e10\") " Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.511823 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hglkz\" (UniqueName: \"kubernetes.io/projected/376db204-9db9-4576-a300-452841866605-kube-api-access-hglkz\") pod \"376db204-9db9-4576-a300-452841866605\" (UID: \"376db204-9db9-4576-a300-452841866605\") " Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.512012 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b5251d69-bf33-43f9-ad5f-ea0937a59e10-fernet-keys\") pod \"b5251d69-bf33-43f9-ad5f-ea0937a59e10\" (UID: \"b5251d69-bf33-43f9-ad5f-ea0937a59e10\") " Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.512118 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/376db204-9db9-4576-a300-452841866605-combined-ca-bundle\") pod \"376db204-9db9-4576-a300-452841866605\" (UID: \"376db204-9db9-4576-a300-452841866605\") " Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.512175 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/376db204-9db9-4576-a300-452841866605-db-purge-config-data\") pod \"376db204-9db9-4576-a300-452841866605\" (UID: \"376db204-9db9-4576-a300-452841866605\") " Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.512234 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-clfmd\" (UniqueName: \"kubernetes.io/projected/b5251d69-bf33-43f9-ad5f-ea0937a59e10-kube-api-access-clfmd\") pod \"b5251d69-bf33-43f9-ad5f-ea0937a59e10\" (UID: \"b5251d69-bf33-43f9-ad5f-ea0937a59e10\") " Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.512269 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/376db204-9db9-4576-a300-452841866605-config-data\") pod \"376db204-9db9-4576-a300-452841866605\" (UID: \"376db204-9db9-4576-a300-452841866605\") " Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.512316 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5251d69-bf33-43f9-ad5f-ea0937a59e10-combined-ca-bundle\") pod \"b5251d69-bf33-43f9-ad5f-ea0937a59e10\" (UID: \"b5251d69-bf33-43f9-ad5f-ea0937a59e10\") " Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.516900 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5251d69-bf33-43f9-ad5f-ea0937a59e10-kube-api-access-clfmd" (OuterVolumeSpecName: "kube-api-access-clfmd") pod "b5251d69-bf33-43f9-ad5f-ea0937a59e10" (UID: "b5251d69-bf33-43f9-ad5f-ea0937a59e10"). InnerVolumeSpecName "kube-api-access-clfmd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.518010 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/376db204-9db9-4576-a300-452841866605-kube-api-access-hglkz" (OuterVolumeSpecName: "kube-api-access-hglkz") pod "376db204-9db9-4576-a300-452841866605" (UID: "376db204-9db9-4576-a300-452841866605"). InnerVolumeSpecName "kube-api-access-hglkz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.519256 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/376db204-9db9-4576-a300-452841866605-db-purge-config-data" (OuterVolumeSpecName: "db-purge-config-data") pod "376db204-9db9-4576-a300-452841866605" (UID: "376db204-9db9-4576-a300-452841866605"). InnerVolumeSpecName "db-purge-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.529991 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5251d69-bf33-43f9-ad5f-ea0937a59e10-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b5251d69-bf33-43f9-ad5f-ea0937a59e10" (UID: "b5251d69-bf33-43f9-ad5f-ea0937a59e10"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.567901 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5251d69-bf33-43f9-ad5f-ea0937a59e10-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b5251d69-bf33-43f9-ad5f-ea0937a59e10" (UID: "b5251d69-bf33-43f9-ad5f-ea0937a59e10"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.567934 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/376db204-9db9-4576-a300-452841866605-config-data" (OuterVolumeSpecName: "config-data") pod "376db204-9db9-4576-a300-452841866605" (UID: "376db204-9db9-4576-a300-452841866605"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.572167 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/376db204-9db9-4576-a300-452841866605-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "376db204-9db9-4576-a300-452841866605" (UID: "376db204-9db9-4576-a300-452841866605"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.611315 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5251d69-bf33-43f9-ad5f-ea0937a59e10-config-data" (OuterVolumeSpecName: "config-data") pod "b5251d69-bf33-43f9-ad5f-ea0937a59e10" (UID: "b5251d69-bf33-43f9-ad5f-ea0937a59e10"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.614751 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-clfmd\" (UniqueName: \"kubernetes.io/projected/b5251d69-bf33-43f9-ad5f-ea0937a59e10-kube-api-access-clfmd\") on node \"crc\" DevicePath \"\"" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.614830 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/376db204-9db9-4576-a300-452841866605-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.614884 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5251d69-bf33-43f9-ad5f-ea0937a59e10-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.615033 5045 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5251d69-bf33-43f9-ad5f-ea0937a59e10-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.615107 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hglkz\" (UniqueName: \"kubernetes.io/projected/376db204-9db9-4576-a300-452841866605-kube-api-access-hglkz\") on node \"crc\" DevicePath \"\"" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.615448 5045 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b5251d69-bf33-43f9-ad5f-ea0937a59e10-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.615523 5045 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/376db204-9db9-4576-a300-452841866605-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.615586 5045 reconciler_common.go:293] "Volume detached for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/376db204-9db9-4576-a300-452841866605-db-purge-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.996252 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401921-6p59q" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.996246 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401921-6p59q" event={"ID":"b5251d69-bf33-43f9-ad5f-ea0937a59e10","Type":"ContainerDied","Data":"3fbd2f8d9554e60321c0c231b56caec0aea173c5e43f2fb6dffe155cc1d721a5"} Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.997878 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3fbd2f8d9554e60321c0c231b56caec0aea173c5e43f2fb6dffe155cc1d721a5" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.998697 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-purge-29401921-mw666" event={"ID":"376db204-9db9-4576-a300-452841866605","Type":"ContainerDied","Data":"e8b154357d448f95ba8e35b2a24150f952f93c450bd79cee6dbfb92356123f59"} Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.998765 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e8b154357d448f95ba8e35b2a24150f952f93c450bd79cee6dbfb92356123f59" Nov 26 00:01:06 crc kubenswrapper[5045]: I1126 00:01:06.998834 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-purge-29401921-mw666" Nov 26 00:01:14 crc kubenswrapper[5045]: I1126 00:01:14.158681 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-srctj_6d561240-e484-4a96-aff1-aef1a5c56daf/nmstate-console-plugin/0.log" Nov 26 00:01:14 crc kubenswrapper[5045]: I1126 00:01:14.262651 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-gmpgg_523926ce-7459-44b3-bc6b-03782619bc1e/nmstate-handler/0.log" Nov 26 00:01:14 crc kubenswrapper[5045]: I1126 00:01:14.316287 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-2fvh8_10586198-1de3-4da4-9ba1-b79a9785da2f/kube-rbac-proxy/0.log" Nov 26 00:01:14 crc kubenswrapper[5045]: I1126 00:01:14.331748 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-2fvh8_10586198-1de3-4da4-9ba1-b79a9785da2f/nmstate-metrics/0.log" Nov 26 00:01:14 crc kubenswrapper[5045]: I1126 00:01:14.510839 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-cn5kf_64863e5b-fa79-4a6f-af83-631dafa8a1c4/nmstate-operator/0.log" Nov 26 00:01:14 crc kubenswrapper[5045]: I1126 00:01:14.529678 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-6vs48_8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73/nmstate-webhook/0.log" Nov 26 00:01:20 crc kubenswrapper[5045]: I1126 00:01:20.397307 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:01:20 crc kubenswrapper[5045]: E1126 00:01:20.397932 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:01:23 crc kubenswrapper[5045]: I1126 00:01:23.060763 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-create-2t5tj"] Nov 26 00:01:23 crc kubenswrapper[5045]: I1126 00:01:23.074319 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-create-2t5tj"] Nov 26 00:01:24 crc kubenswrapper[5045]: I1126 00:01:24.448445 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c091d493-4263-41da-a276-6dc859d7d5e1" path="/var/lib/kubelet/pods/c091d493-4263-41da-a276-6dc859d7d5e1/volumes" Nov 26 00:01:25 crc kubenswrapper[5045]: I1126 00:01:25.036645 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-2c9f-account-create-update-zpp6n"] Nov 26 00:01:25 crc kubenswrapper[5045]: I1126 00:01:25.045104 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-2c9f-account-create-update-zpp6n"] Nov 26 00:01:26 crc kubenswrapper[5045]: I1126 00:01:26.413623 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="016700b4-9818-4d96-be4d-d6b07316b91f" path="/var/lib/kubelet/pods/016700b4-9818-4d96-be4d-d6b07316b91f/volumes" Nov 26 00:01:28 crc kubenswrapper[5045]: I1126 00:01:28.627138 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-4g95c_e22db1cb-76eb-4541-8098-95b688ccbe00/kube-rbac-proxy/0.log" Nov 26 00:01:28 crc kubenswrapper[5045]: I1126 00:01:28.762113 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-4g95c_e22db1cb-76eb-4541-8098-95b688ccbe00/controller/0.log" Nov 26 00:01:28 crc kubenswrapper[5045]: I1126 00:01:28.831449 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-frr-files/0.log" Nov 26 00:01:29 crc kubenswrapper[5045]: I1126 00:01:29.005746 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-frr-files/0.log" Nov 26 00:01:29 crc kubenswrapper[5045]: I1126 00:01:29.048411 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-metrics/0.log" Nov 26 00:01:29 crc kubenswrapper[5045]: I1126 00:01:29.072472 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-reloader/0.log" Nov 26 00:01:29 crc kubenswrapper[5045]: I1126 00:01:29.077418 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-reloader/0.log" Nov 26 00:01:29 crc kubenswrapper[5045]: I1126 00:01:29.206818 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-frr-files/0.log" Nov 26 00:01:29 crc kubenswrapper[5045]: I1126 00:01:29.263982 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-reloader/0.log" Nov 26 00:01:29 crc kubenswrapper[5045]: I1126 00:01:29.325616 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-metrics/0.log" Nov 26 00:01:29 crc kubenswrapper[5045]: I1126 00:01:29.374441 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-metrics/0.log" Nov 26 00:01:29 crc kubenswrapper[5045]: I1126 00:01:29.481274 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-reloader/0.log" Nov 26 00:01:29 crc kubenswrapper[5045]: I1126 00:01:29.493609 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-metrics/0.log" Nov 26 00:01:29 crc kubenswrapper[5045]: I1126 00:01:29.503172 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-frr-files/0.log" Nov 26 00:01:29 crc kubenswrapper[5045]: I1126 00:01:29.590024 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/controller/0.log" Nov 26 00:01:29 crc kubenswrapper[5045]: I1126 00:01:29.681089 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/kube-rbac-proxy/0.log" Nov 26 00:01:29 crc kubenswrapper[5045]: I1126 00:01:29.756359 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/frr-metrics/0.log" Nov 26 00:01:29 crc kubenswrapper[5045]: I1126 00:01:29.808289 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/kube-rbac-proxy-frr/0.log" Nov 26 00:01:29 crc kubenswrapper[5045]: I1126 00:01:29.863255 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/reloader/0.log" Nov 26 00:01:29 crc kubenswrapper[5045]: I1126 00:01:29.996530 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-5l6nm_9c4acc23-28a7-432a-8a17-63550727f1a6/frr-k8s-webhook-server/0.log" Nov 26 00:01:30 crc kubenswrapper[5045]: I1126 00:01:30.230054 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-796ffbd7cd-282k4_211004af-dcb4-4397-bced-fd0c3e3da2a3/manager/0.log" Nov 26 00:01:30 crc kubenswrapper[5045]: I1126 00:01:30.316670 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-7cfb757c46-6g84d_aa1a672e-f330-4cef-bf0f-c471b30ac61d/webhook-server/0.log" Nov 26 00:01:30 crc kubenswrapper[5045]: I1126 00:01:30.445738 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-sbr7m_fcd4cd5e-d7b9-4666-8eac-781cee36189a/kube-rbac-proxy/0.log" Nov 26 00:01:31 crc kubenswrapper[5045]: I1126 00:01:31.005852 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-sbr7m_fcd4cd5e-d7b9-4666-8eac-781cee36189a/speaker/0.log" Nov 26 00:01:31 crc kubenswrapper[5045]: I1126 00:01:31.043890 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/frr/0.log" Nov 26 00:01:33 crc kubenswrapper[5045]: I1126 00:01:33.396939 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:01:33 crc kubenswrapper[5045]: E1126 00:01:33.397467 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:01:41 crc kubenswrapper[5045]: I1126 00:01:41.960016 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2_2fc73dbf-cedf-448c-9c04-bd1db878524a/util/0.log" Nov 26 00:01:42 crc kubenswrapper[5045]: I1126 00:01:42.164580 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2_2fc73dbf-cedf-448c-9c04-bd1db878524a/pull/0.log" Nov 26 00:01:42 crc kubenswrapper[5045]: I1126 00:01:42.170156 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2_2fc73dbf-cedf-448c-9c04-bd1db878524a/util/0.log" Nov 26 00:01:42 crc kubenswrapper[5045]: I1126 00:01:42.181097 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2_2fc73dbf-cedf-448c-9c04-bd1db878524a/pull/0.log" Nov 26 00:01:42 crc kubenswrapper[5045]: I1126 00:01:42.342186 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2_2fc73dbf-cedf-448c-9c04-bd1db878524a/util/0.log" Nov 26 00:01:42 crc kubenswrapper[5045]: I1126 00:01:42.342623 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2_2fc73dbf-cedf-448c-9c04-bd1db878524a/pull/0.log" Nov 26 00:01:42 crc kubenswrapper[5045]: I1126 00:01:42.344611 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2_2fc73dbf-cedf-448c-9c04-bd1db878524a/extract/0.log" Nov 26 00:01:42 crc kubenswrapper[5045]: I1126 00:01:42.549167 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9k62x_c53fe041-30c1-448d-9eaa-1db8e0163b83/extract-utilities/0.log" Nov 26 00:01:42 crc kubenswrapper[5045]: I1126 00:01:42.659069 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9k62x_c53fe041-30c1-448d-9eaa-1db8e0163b83/extract-utilities/0.log" Nov 26 00:01:42 crc kubenswrapper[5045]: I1126 00:01:42.745118 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9k62x_c53fe041-30c1-448d-9eaa-1db8e0163b83/extract-content/0.log" Nov 26 00:01:42 crc kubenswrapper[5045]: I1126 00:01:42.748841 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9k62x_c53fe041-30c1-448d-9eaa-1db8e0163b83/extract-content/0.log" Nov 26 00:01:42 crc kubenswrapper[5045]: I1126 00:01:42.891289 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9k62x_c53fe041-30c1-448d-9eaa-1db8e0163b83/extract-utilities/0.log" Nov 26 00:01:42 crc kubenswrapper[5045]: I1126 00:01:42.930614 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9k62x_c53fe041-30c1-448d-9eaa-1db8e0163b83/extract-content/0.log" Nov 26 00:01:43 crc kubenswrapper[5045]: I1126 00:01:43.162381 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4v65k_78a1fdd9-da9e-445f-9d75-167eff9d37a9/extract-utilities/0.log" Nov 26 00:01:43 crc kubenswrapper[5045]: I1126 00:01:43.420292 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4v65k_78a1fdd9-da9e-445f-9d75-167eff9d37a9/extract-content/0.log" Nov 26 00:01:43 crc kubenswrapper[5045]: I1126 00:01:43.421750 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4v65k_78a1fdd9-da9e-445f-9d75-167eff9d37a9/extract-content/0.log" Nov 26 00:01:43 crc kubenswrapper[5045]: I1126 00:01:43.430726 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9k62x_c53fe041-30c1-448d-9eaa-1db8e0163b83/registry-server/0.log" Nov 26 00:01:43 crc kubenswrapper[5045]: I1126 00:01:43.441239 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4v65k_78a1fdd9-da9e-445f-9d75-167eff9d37a9/extract-utilities/0.log" Nov 26 00:01:43 crc kubenswrapper[5045]: I1126 00:01:43.682040 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4v65k_78a1fdd9-da9e-445f-9d75-167eff9d37a9/extract-content/0.log" Nov 26 00:01:43 crc kubenswrapper[5045]: I1126 00:01:43.692816 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4v65k_78a1fdd9-da9e-445f-9d75-167eff9d37a9/extract-utilities/0.log" Nov 26 00:01:43 crc kubenswrapper[5045]: I1126 00:01:43.956133 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv_95a24d51-cffe-4618-b4f0-324815a1848e/util/0.log" Nov 26 00:01:44 crc kubenswrapper[5045]: I1126 00:01:44.084779 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv_95a24d51-cffe-4618-b4f0-324815a1848e/util/0.log" Nov 26 00:01:44 crc kubenswrapper[5045]: I1126 00:01:44.124689 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4v65k_78a1fdd9-da9e-445f-9d75-167eff9d37a9/registry-server/0.log" Nov 26 00:01:44 crc kubenswrapper[5045]: I1126 00:01:44.139231 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv_95a24d51-cffe-4618-b4f0-324815a1848e/pull/0.log" Nov 26 00:01:44 crc kubenswrapper[5045]: I1126 00:01:44.144955 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv_95a24d51-cffe-4618-b4f0-324815a1848e/pull/0.log" Nov 26 00:01:44 crc kubenswrapper[5045]: I1126 00:01:44.307518 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv_95a24d51-cffe-4618-b4f0-324815a1848e/pull/0.log" Nov 26 00:01:44 crc kubenswrapper[5045]: I1126 00:01:44.314272 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv_95a24d51-cffe-4618-b4f0-324815a1848e/util/0.log" Nov 26 00:01:44 crc kubenswrapper[5045]: I1126 00:01:44.315635 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv_95a24d51-cffe-4618-b4f0-324815a1848e/extract/0.log" Nov 26 00:01:44 crc kubenswrapper[5045]: I1126 00:01:44.527002 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-f5g5t_292cc94b-5ed6-4491-8168-1ac68858f418/marketplace-operator/0.log" Nov 26 00:01:44 crc kubenswrapper[5045]: I1126 00:01:44.536016 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bnp6n_3913a533-c2e2-4737-b52e-b90f29f979a5/extract-utilities/0.log" Nov 26 00:01:44 crc kubenswrapper[5045]: I1126 00:01:44.710799 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bnp6n_3913a533-c2e2-4737-b52e-b90f29f979a5/extract-utilities/0.log" Nov 26 00:01:44 crc kubenswrapper[5045]: I1126 00:01:44.712229 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bnp6n_3913a533-c2e2-4737-b52e-b90f29f979a5/extract-content/0.log" Nov 26 00:01:44 crc kubenswrapper[5045]: I1126 00:01:44.731105 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bnp6n_3913a533-c2e2-4737-b52e-b90f29f979a5/extract-content/0.log" Nov 26 00:01:44 crc kubenswrapper[5045]: I1126 00:01:44.921195 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bnp6n_3913a533-c2e2-4737-b52e-b90f29f979a5/extract-content/0.log" Nov 26 00:01:44 crc kubenswrapper[5045]: I1126 00:01:44.932170 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bnp6n_3913a533-c2e2-4737-b52e-b90f29f979a5/extract-utilities/0.log" Nov 26 00:01:45 crc kubenswrapper[5045]: I1126 00:01:45.081808 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bqxg8_6fc579e4-208c-4708-aa65-f7b2262eada1/extract-utilities/0.log" Nov 26 00:01:45 crc kubenswrapper[5045]: I1126 00:01:45.088636 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bnp6n_3913a533-c2e2-4737-b52e-b90f29f979a5/registry-server/0.log" Nov 26 00:01:45 crc kubenswrapper[5045]: I1126 00:01:45.251481 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bqxg8_6fc579e4-208c-4708-aa65-f7b2262eada1/extract-utilities/0.log" Nov 26 00:01:45 crc kubenswrapper[5045]: I1126 00:01:45.302525 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bqxg8_6fc579e4-208c-4708-aa65-f7b2262eada1/extract-content/0.log" Nov 26 00:01:45 crc kubenswrapper[5045]: I1126 00:01:45.307981 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bqxg8_6fc579e4-208c-4708-aa65-f7b2262eada1/extract-content/0.log" Nov 26 00:01:45 crc kubenswrapper[5045]: I1126 00:01:45.495127 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bqxg8_6fc579e4-208c-4708-aa65-f7b2262eada1/extract-content/0.log" Nov 26 00:01:45 crc kubenswrapper[5045]: I1126 00:01:45.508292 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bqxg8_6fc579e4-208c-4708-aa65-f7b2262eada1/extract-utilities/0.log" Nov 26 00:01:46 crc kubenswrapper[5045]: I1126 00:01:46.042165 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bqxg8_6fc579e4-208c-4708-aa65-f7b2262eada1/registry-server/0.log" Nov 26 00:01:47 crc kubenswrapper[5045]: I1126 00:01:47.396801 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:01:47 crc kubenswrapper[5045]: E1126 00:01:47.397282 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:01:58 crc kubenswrapper[5045]: I1126 00:01:58.396307 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:01:58 crc kubenswrapper[5045]: E1126 00:01:58.397251 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:02:01 crc kubenswrapper[5045]: I1126 00:02:01.044211 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-sync-fjsxq"] Nov 26 00:02:01 crc kubenswrapper[5045]: I1126 00:02:01.052353 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-sync-fjsxq"] Nov 26 00:02:02 crc kubenswrapper[5045]: I1126 00:02:02.407009 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b764d3f-bf9c-4407-b5d5-6f2834714d50" path="/var/lib/kubelet/pods/8b764d3f-bf9c-4407-b5d5-6f2834714d50/volumes" Nov 26 00:02:09 crc kubenswrapper[5045]: I1126 00:02:09.396673 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:02:09 crc kubenswrapper[5045]: E1126 00:02:09.407925 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:02:22 crc kubenswrapper[5045]: I1126 00:02:22.578705 5045 scope.go:117] "RemoveContainer" containerID="ac795a1accd2529668995e4335212aa5d254db73b433a31d8ebc2c54bb3053ab" Nov 26 00:02:22 crc kubenswrapper[5045]: I1126 00:02:22.603571 5045 scope.go:117] "RemoveContainer" containerID="89a043454ac16a34ec97c43db45bb9a9c3ab1b92053b1c560fbb4cd1f9855a7d" Nov 26 00:02:22 crc kubenswrapper[5045]: I1126 00:02:22.676122 5045 scope.go:117] "RemoveContainer" containerID="57a08480fcd1cae4536ecd417d818938b46a68f8d998252108e14864baac41bb" Nov 26 00:02:24 crc kubenswrapper[5045]: I1126 00:02:24.416073 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:02:24 crc kubenswrapper[5045]: E1126 00:02:24.416594 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:02:38 crc kubenswrapper[5045]: I1126 00:02:38.397448 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:02:38 crc kubenswrapper[5045]: E1126 00:02:38.398273 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:02:52 crc kubenswrapper[5045]: I1126 00:02:52.396680 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:02:52 crc kubenswrapper[5045]: E1126 00:02:52.397650 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:03:04 crc kubenswrapper[5045]: I1126 00:03:04.406442 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:03:04 crc kubenswrapper[5045]: E1126 00:03:04.408437 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:03:17 crc kubenswrapper[5045]: I1126 00:03:17.396892 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:03:17 crc kubenswrapper[5045]: E1126 00:03:17.397896 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:03:28 crc kubenswrapper[5045]: I1126 00:03:28.397802 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:03:28 crc kubenswrapper[5045]: E1126 00:03:28.398843 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.412515 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-56jl6"] Nov 26 00:03:31 crc kubenswrapper[5045]: E1126 00:03:31.413543 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="376db204-9db9-4576-a300-452841866605" containerName="cinder-db-purge" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.413555 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="376db204-9db9-4576-a300-452841866605" containerName="cinder-db-purge" Nov 26 00:03:31 crc kubenswrapper[5045]: E1126 00:03:31.413579 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5251d69-bf33-43f9-ad5f-ea0937a59e10" containerName="keystone-cron" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.413585 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5251d69-bf33-43f9-ad5f-ea0937a59e10" containerName="keystone-cron" Nov 26 00:03:31 crc kubenswrapper[5045]: E1126 00:03:31.413609 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14363206-5b08-472d-8fe1-4950a2378e74" containerName="glance-dbpurge" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.413614 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="14363206-5b08-472d-8fe1-4950a2378e74" containerName="glance-dbpurge" Nov 26 00:03:31 crc kubenswrapper[5045]: E1126 00:03:31.413625 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cef47d90-76d7-4f52-8583-94f0decfd788" containerName="manila-db-purge" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.413631 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="cef47d90-76d7-4f52-8583-94f0decfd788" containerName="manila-db-purge" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.413817 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5251d69-bf33-43f9-ad5f-ea0937a59e10" containerName="keystone-cron" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.413837 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="376db204-9db9-4576-a300-452841866605" containerName="cinder-db-purge" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.413847 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="14363206-5b08-472d-8fe1-4950a2378e74" containerName="glance-dbpurge" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.413858 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="cef47d90-76d7-4f52-8583-94f0decfd788" containerName="manila-db-purge" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.415122 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-56jl6" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.425965 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-56jl6"] Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.466691 5045 generic.go:334] "Generic (PLEG): container finished" podID="c1093fd7-5afc-4afd-be60-04a0885dbf62" containerID="e47dbfc15ad8633b5e0c4928a13b4c336077836c25f1206697d496fbebcdc017" exitCode=0 Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.466980 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ntlcp/must-gather-6cj2k" event={"ID":"c1093fd7-5afc-4afd-be60-04a0885dbf62","Type":"ContainerDied","Data":"e47dbfc15ad8633b5e0c4928a13b4c336077836c25f1206697d496fbebcdc017"} Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.468006 5045 scope.go:117] "RemoveContainer" containerID="e47dbfc15ad8633b5e0c4928a13b4c336077836c25f1206697d496fbebcdc017" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.605391 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9bef72a-aee1-42c0-b740-2ec9df18b70e-utilities\") pod \"community-operators-56jl6\" (UID: \"a9bef72a-aee1-42c0-b740-2ec9df18b70e\") " pod="openshift-marketplace/community-operators-56jl6" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.605459 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwd2z\" (UniqueName: \"kubernetes.io/projected/a9bef72a-aee1-42c0-b740-2ec9df18b70e-kube-api-access-hwd2z\") pod \"community-operators-56jl6\" (UID: \"a9bef72a-aee1-42c0-b740-2ec9df18b70e\") " pod="openshift-marketplace/community-operators-56jl6" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.606246 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9bef72a-aee1-42c0-b740-2ec9df18b70e-catalog-content\") pod \"community-operators-56jl6\" (UID: \"a9bef72a-aee1-42c0-b740-2ec9df18b70e\") " pod="openshift-marketplace/community-operators-56jl6" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.707830 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9bef72a-aee1-42c0-b740-2ec9df18b70e-utilities\") pod \"community-operators-56jl6\" (UID: \"a9bef72a-aee1-42c0-b740-2ec9df18b70e\") " pod="openshift-marketplace/community-operators-56jl6" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.707875 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwd2z\" (UniqueName: \"kubernetes.io/projected/a9bef72a-aee1-42c0-b740-2ec9df18b70e-kube-api-access-hwd2z\") pod \"community-operators-56jl6\" (UID: \"a9bef72a-aee1-42c0-b740-2ec9df18b70e\") " pod="openshift-marketplace/community-operators-56jl6" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.707920 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9bef72a-aee1-42c0-b740-2ec9df18b70e-catalog-content\") pod \"community-operators-56jl6\" (UID: \"a9bef72a-aee1-42c0-b740-2ec9df18b70e\") " pod="openshift-marketplace/community-operators-56jl6" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.708438 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9bef72a-aee1-42c0-b740-2ec9df18b70e-utilities\") pod \"community-operators-56jl6\" (UID: \"a9bef72a-aee1-42c0-b740-2ec9df18b70e\") " pod="openshift-marketplace/community-operators-56jl6" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.708475 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9bef72a-aee1-42c0-b740-2ec9df18b70e-catalog-content\") pod \"community-operators-56jl6\" (UID: \"a9bef72a-aee1-42c0-b740-2ec9df18b70e\") " pod="openshift-marketplace/community-operators-56jl6" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.730789 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwd2z\" (UniqueName: \"kubernetes.io/projected/a9bef72a-aee1-42c0-b740-2ec9df18b70e-kube-api-access-hwd2z\") pod \"community-operators-56jl6\" (UID: \"a9bef72a-aee1-42c0-b740-2ec9df18b70e\") " pod="openshift-marketplace/community-operators-56jl6" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.738650 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-56jl6" Nov 26 00:03:31 crc kubenswrapper[5045]: I1126 00:03:31.793749 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-ntlcp_must-gather-6cj2k_c1093fd7-5afc-4afd-be60-04a0885dbf62/gather/0.log" Nov 26 00:03:32 crc kubenswrapper[5045]: I1126 00:03:32.325281 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-56jl6"] Nov 26 00:03:32 crc kubenswrapper[5045]: I1126 00:03:32.476935 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-56jl6" event={"ID":"a9bef72a-aee1-42c0-b740-2ec9df18b70e","Type":"ContainerStarted","Data":"88d60177be12fa2ffdcdb29670c7a8dc1c84da94db93a9bc1bfdf9f74cf3d2fd"} Nov 26 00:03:33 crc kubenswrapper[5045]: I1126 00:03:33.489351 5045 generic.go:334] "Generic (PLEG): container finished" podID="a9bef72a-aee1-42c0-b740-2ec9df18b70e" containerID="54a9f37b951e3c2128d4272be3919609d51e876fe89dca33e677ad647a0d4c25" exitCode=0 Nov 26 00:03:33 crc kubenswrapper[5045]: I1126 00:03:33.489813 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-56jl6" event={"ID":"a9bef72a-aee1-42c0-b740-2ec9df18b70e","Type":"ContainerDied","Data":"54a9f37b951e3c2128d4272be3919609d51e876fe89dca33e677ad647a0d4c25"} Nov 26 00:03:33 crc kubenswrapper[5045]: I1126 00:03:33.492886 5045 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 00:03:35 crc kubenswrapper[5045]: I1126 00:03:35.514776 5045 generic.go:334] "Generic (PLEG): container finished" podID="a9bef72a-aee1-42c0-b740-2ec9df18b70e" containerID="c24adcae216ee3e368f2bb84b94fcdf75a55f9e2c228567872b9f4fe489bd677" exitCode=0 Nov 26 00:03:35 crc kubenswrapper[5045]: I1126 00:03:35.514902 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-56jl6" event={"ID":"a9bef72a-aee1-42c0-b740-2ec9df18b70e","Type":"ContainerDied","Data":"c24adcae216ee3e368f2bb84b94fcdf75a55f9e2c228567872b9f4fe489bd677"} Nov 26 00:03:36 crc kubenswrapper[5045]: I1126 00:03:36.526466 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-56jl6" event={"ID":"a9bef72a-aee1-42c0-b740-2ec9df18b70e","Type":"ContainerStarted","Data":"a2c9adf59673f026a31c827b56184acacee63a8e8e922134e8763db75ee8c919"} Nov 26 00:03:36 crc kubenswrapper[5045]: I1126 00:03:36.550521 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-56jl6" podStartSLOduration=3.144982785 podStartE2EDuration="5.550501s" podCreationTimestamp="2025-11-26 00:03:31 +0000 UTC" firstStartedPulling="2025-11-26 00:03:33.491086593 +0000 UTC m=+3869.848745705" lastFinishedPulling="2025-11-26 00:03:35.896604798 +0000 UTC m=+3872.254263920" observedRunningTime="2025-11-26 00:03:36.548075092 +0000 UTC m=+3872.905734204" watchObservedRunningTime="2025-11-26 00:03:36.550501 +0000 UTC m=+3872.908160132" Nov 26 00:03:39 crc kubenswrapper[5045]: I1126 00:03:39.055165 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-ntlcp/must-gather-6cj2k"] Nov 26 00:03:39 crc kubenswrapper[5045]: I1126 00:03:39.055974 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-ntlcp/must-gather-6cj2k" podUID="c1093fd7-5afc-4afd-be60-04a0885dbf62" containerName="copy" containerID="cri-o://8a820af9735619139628ff54fbc2bdaa6e40280d0d9d113343b344096d7c4075" gracePeriod=2 Nov 26 00:03:39 crc kubenswrapper[5045]: I1126 00:03:39.067629 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-ntlcp/must-gather-6cj2k"] Nov 26 00:03:39 crc kubenswrapper[5045]: I1126 00:03:39.568870 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-ntlcp_must-gather-6cj2k_c1093fd7-5afc-4afd-be60-04a0885dbf62/copy/0.log" Nov 26 00:03:39 crc kubenswrapper[5045]: I1126 00:03:39.569661 5045 generic.go:334] "Generic (PLEG): container finished" podID="c1093fd7-5afc-4afd-be60-04a0885dbf62" containerID="8a820af9735619139628ff54fbc2bdaa6e40280d0d9d113343b344096d7c4075" exitCode=143 Nov 26 00:03:39 crc kubenswrapper[5045]: I1126 00:03:39.569890 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c2127dff302f7a989611410a6f015a71021afa63350f09a585be8ecf6f9ae249" Nov 26 00:03:39 crc kubenswrapper[5045]: I1126 00:03:39.607609 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-ntlcp_must-gather-6cj2k_c1093fd7-5afc-4afd-be60-04a0885dbf62/copy/0.log" Nov 26 00:03:39 crc kubenswrapper[5045]: I1126 00:03:39.608135 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ntlcp/must-gather-6cj2k" Nov 26 00:03:39 crc kubenswrapper[5045]: I1126 00:03:39.704683 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c1093fd7-5afc-4afd-be60-04a0885dbf62-must-gather-output\") pod \"c1093fd7-5afc-4afd-be60-04a0885dbf62\" (UID: \"c1093fd7-5afc-4afd-be60-04a0885dbf62\") " Nov 26 00:03:39 crc kubenswrapper[5045]: I1126 00:03:39.704762 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jd4vp\" (UniqueName: \"kubernetes.io/projected/c1093fd7-5afc-4afd-be60-04a0885dbf62-kube-api-access-jd4vp\") pod \"c1093fd7-5afc-4afd-be60-04a0885dbf62\" (UID: \"c1093fd7-5afc-4afd-be60-04a0885dbf62\") " Nov 26 00:03:39 crc kubenswrapper[5045]: I1126 00:03:39.714353 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1093fd7-5afc-4afd-be60-04a0885dbf62-kube-api-access-jd4vp" (OuterVolumeSpecName: "kube-api-access-jd4vp") pod "c1093fd7-5afc-4afd-be60-04a0885dbf62" (UID: "c1093fd7-5afc-4afd-be60-04a0885dbf62"). InnerVolumeSpecName "kube-api-access-jd4vp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:03:39 crc kubenswrapper[5045]: I1126 00:03:39.807440 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jd4vp\" (UniqueName: \"kubernetes.io/projected/c1093fd7-5afc-4afd-be60-04a0885dbf62-kube-api-access-jd4vp\") on node \"crc\" DevicePath \"\"" Nov 26 00:03:39 crc kubenswrapper[5045]: I1126 00:03:39.847175 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1093fd7-5afc-4afd-be60-04a0885dbf62-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "c1093fd7-5afc-4afd-be60-04a0885dbf62" (UID: "c1093fd7-5afc-4afd-be60-04a0885dbf62"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:03:39 crc kubenswrapper[5045]: I1126 00:03:39.909498 5045 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c1093fd7-5afc-4afd-be60-04a0885dbf62-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 26 00:03:40 crc kubenswrapper[5045]: I1126 00:03:40.397204 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:03:40 crc kubenswrapper[5045]: E1126 00:03:40.397969 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:03:40 crc kubenswrapper[5045]: I1126 00:03:40.408075 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1093fd7-5afc-4afd-be60-04a0885dbf62" path="/var/lib/kubelet/pods/c1093fd7-5afc-4afd-be60-04a0885dbf62/volumes" Nov 26 00:03:40 crc kubenswrapper[5045]: I1126 00:03:40.580300 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ntlcp/must-gather-6cj2k" Nov 26 00:03:41 crc kubenswrapper[5045]: I1126 00:03:41.739204 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-56jl6" Nov 26 00:03:41 crc kubenswrapper[5045]: I1126 00:03:41.739534 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-56jl6" Nov 26 00:03:41 crc kubenswrapper[5045]: I1126 00:03:41.797864 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-56jl6" Nov 26 00:03:42 crc kubenswrapper[5045]: I1126 00:03:42.655073 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-56jl6" Nov 26 00:03:42 crc kubenswrapper[5045]: I1126 00:03:42.707502 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-56jl6"] Nov 26 00:03:44 crc kubenswrapper[5045]: I1126 00:03:44.617143 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-56jl6" podUID="a9bef72a-aee1-42c0-b740-2ec9df18b70e" containerName="registry-server" containerID="cri-o://a2c9adf59673f026a31c827b56184acacee63a8e8e922134e8763db75ee8c919" gracePeriod=2 Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.208484 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-56jl6" Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.349998 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9bef72a-aee1-42c0-b740-2ec9df18b70e-catalog-content\") pod \"a9bef72a-aee1-42c0-b740-2ec9df18b70e\" (UID: \"a9bef72a-aee1-42c0-b740-2ec9df18b70e\") " Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.350050 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9bef72a-aee1-42c0-b740-2ec9df18b70e-utilities\") pod \"a9bef72a-aee1-42c0-b740-2ec9df18b70e\" (UID: \"a9bef72a-aee1-42c0-b740-2ec9df18b70e\") " Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.350249 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hwd2z\" (UniqueName: \"kubernetes.io/projected/a9bef72a-aee1-42c0-b740-2ec9df18b70e-kube-api-access-hwd2z\") pod \"a9bef72a-aee1-42c0-b740-2ec9df18b70e\" (UID: \"a9bef72a-aee1-42c0-b740-2ec9df18b70e\") " Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.351106 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9bef72a-aee1-42c0-b740-2ec9df18b70e-utilities" (OuterVolumeSpecName: "utilities") pod "a9bef72a-aee1-42c0-b740-2ec9df18b70e" (UID: "a9bef72a-aee1-42c0-b740-2ec9df18b70e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.354805 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9bef72a-aee1-42c0-b740-2ec9df18b70e-kube-api-access-hwd2z" (OuterVolumeSpecName: "kube-api-access-hwd2z") pod "a9bef72a-aee1-42c0-b740-2ec9df18b70e" (UID: "a9bef72a-aee1-42c0-b740-2ec9df18b70e"). InnerVolumeSpecName "kube-api-access-hwd2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.417258 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9bef72a-aee1-42c0-b740-2ec9df18b70e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a9bef72a-aee1-42c0-b740-2ec9df18b70e" (UID: "a9bef72a-aee1-42c0-b740-2ec9df18b70e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.452702 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hwd2z\" (UniqueName: \"kubernetes.io/projected/a9bef72a-aee1-42c0-b740-2ec9df18b70e-kube-api-access-hwd2z\") on node \"crc\" DevicePath \"\"" Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.452752 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9bef72a-aee1-42c0-b740-2ec9df18b70e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.452763 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9bef72a-aee1-42c0-b740-2ec9df18b70e-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.629002 5045 generic.go:334] "Generic (PLEG): container finished" podID="a9bef72a-aee1-42c0-b740-2ec9df18b70e" containerID="a2c9adf59673f026a31c827b56184acacee63a8e8e922134e8763db75ee8c919" exitCode=0 Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.629052 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-56jl6" event={"ID":"a9bef72a-aee1-42c0-b740-2ec9df18b70e","Type":"ContainerDied","Data":"a2c9adf59673f026a31c827b56184acacee63a8e8e922134e8763db75ee8c919"} Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.629080 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-56jl6" event={"ID":"a9bef72a-aee1-42c0-b740-2ec9df18b70e","Type":"ContainerDied","Data":"88d60177be12fa2ffdcdb29670c7a8dc1c84da94db93a9bc1bfdf9f74cf3d2fd"} Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.629099 5045 scope.go:117] "RemoveContainer" containerID="a2c9adf59673f026a31c827b56184acacee63a8e8e922134e8763db75ee8c919" Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.629140 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-56jl6" Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.656365 5045 scope.go:117] "RemoveContainer" containerID="c24adcae216ee3e368f2bb84b94fcdf75a55f9e2c228567872b9f4fe489bd677" Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.682633 5045 scope.go:117] "RemoveContainer" containerID="54a9f37b951e3c2128d4272be3919609d51e876fe89dca33e677ad647a0d4c25" Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.691988 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-56jl6"] Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.704458 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-56jl6"] Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.738330 5045 scope.go:117] "RemoveContainer" containerID="a2c9adf59673f026a31c827b56184acacee63a8e8e922134e8763db75ee8c919" Nov 26 00:03:45 crc kubenswrapper[5045]: E1126 00:03:45.738751 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2c9adf59673f026a31c827b56184acacee63a8e8e922134e8763db75ee8c919\": container with ID starting with a2c9adf59673f026a31c827b56184acacee63a8e8e922134e8763db75ee8c919 not found: ID does not exist" containerID="a2c9adf59673f026a31c827b56184acacee63a8e8e922134e8763db75ee8c919" Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.738808 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2c9adf59673f026a31c827b56184acacee63a8e8e922134e8763db75ee8c919"} err="failed to get container status \"a2c9adf59673f026a31c827b56184acacee63a8e8e922134e8763db75ee8c919\": rpc error: code = NotFound desc = could not find container \"a2c9adf59673f026a31c827b56184acacee63a8e8e922134e8763db75ee8c919\": container with ID starting with a2c9adf59673f026a31c827b56184acacee63a8e8e922134e8763db75ee8c919 not found: ID does not exist" Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.738843 5045 scope.go:117] "RemoveContainer" containerID="c24adcae216ee3e368f2bb84b94fcdf75a55f9e2c228567872b9f4fe489bd677" Nov 26 00:03:45 crc kubenswrapper[5045]: E1126 00:03:45.739251 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c24adcae216ee3e368f2bb84b94fcdf75a55f9e2c228567872b9f4fe489bd677\": container with ID starting with c24adcae216ee3e368f2bb84b94fcdf75a55f9e2c228567872b9f4fe489bd677 not found: ID does not exist" containerID="c24adcae216ee3e368f2bb84b94fcdf75a55f9e2c228567872b9f4fe489bd677" Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.739317 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c24adcae216ee3e368f2bb84b94fcdf75a55f9e2c228567872b9f4fe489bd677"} err="failed to get container status \"c24adcae216ee3e368f2bb84b94fcdf75a55f9e2c228567872b9f4fe489bd677\": rpc error: code = NotFound desc = could not find container \"c24adcae216ee3e368f2bb84b94fcdf75a55f9e2c228567872b9f4fe489bd677\": container with ID starting with c24adcae216ee3e368f2bb84b94fcdf75a55f9e2c228567872b9f4fe489bd677 not found: ID does not exist" Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.739365 5045 scope.go:117] "RemoveContainer" containerID="54a9f37b951e3c2128d4272be3919609d51e876fe89dca33e677ad647a0d4c25" Nov 26 00:03:45 crc kubenswrapper[5045]: E1126 00:03:45.739977 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54a9f37b951e3c2128d4272be3919609d51e876fe89dca33e677ad647a0d4c25\": container with ID starting with 54a9f37b951e3c2128d4272be3919609d51e876fe89dca33e677ad647a0d4c25 not found: ID does not exist" containerID="54a9f37b951e3c2128d4272be3919609d51e876fe89dca33e677ad647a0d4c25" Nov 26 00:03:45 crc kubenswrapper[5045]: I1126 00:03:45.740010 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54a9f37b951e3c2128d4272be3919609d51e876fe89dca33e677ad647a0d4c25"} err="failed to get container status \"54a9f37b951e3c2128d4272be3919609d51e876fe89dca33e677ad647a0d4c25\": rpc error: code = NotFound desc = could not find container \"54a9f37b951e3c2128d4272be3919609d51e876fe89dca33e677ad647a0d4c25\": container with ID starting with 54a9f37b951e3c2128d4272be3919609d51e876fe89dca33e677ad647a0d4c25 not found: ID does not exist" Nov 26 00:03:46 crc kubenswrapper[5045]: I1126 00:03:46.419855 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9bef72a-aee1-42c0-b740-2ec9df18b70e" path="/var/lib/kubelet/pods/a9bef72a-aee1-42c0-b740-2ec9df18b70e/volumes" Nov 26 00:03:53 crc kubenswrapper[5045]: I1126 00:03:53.397764 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:03:53 crc kubenswrapper[5045]: E1126 00:03:53.399232 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:04:04 crc kubenswrapper[5045]: I1126 00:04:04.407355 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:04:04 crc kubenswrapper[5045]: E1126 00:04:04.408175 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:04:16 crc kubenswrapper[5045]: I1126 00:04:16.397314 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:04:16 crc kubenswrapper[5045]: E1126 00:04:16.398255 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:04:22 crc kubenswrapper[5045]: I1126 00:04:22.816430 5045 scope.go:117] "RemoveContainer" containerID="8a820af9735619139628ff54fbc2bdaa6e40280d0d9d113343b344096d7c4075" Nov 26 00:04:22 crc kubenswrapper[5045]: I1126 00:04:22.836460 5045 scope.go:117] "RemoveContainer" containerID="e47dbfc15ad8633b5e0c4928a13b4c336077836c25f1206697d496fbebcdc017" Nov 26 00:04:29 crc kubenswrapper[5045]: I1126 00:04:29.396526 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:04:29 crc kubenswrapper[5045]: E1126 00:04:29.397519 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:04:44 crc kubenswrapper[5045]: I1126 00:04:44.409025 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:04:44 crc kubenswrapper[5045]: E1126 00:04:44.409853 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:04:58 crc kubenswrapper[5045]: I1126 00:04:58.396881 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:04:58 crc kubenswrapper[5045]: E1126 00:04:58.398015 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:05:10 crc kubenswrapper[5045]: I1126 00:05:10.397268 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:05:10 crc kubenswrapper[5045]: I1126 00:05:10.770521 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerStarted","Data":"28b72954faac5d5a2ef5c6de8b1d4873e24cc35ea4d44d3b52d3e1df98dfe607"} Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.014578 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-stwvx"] Nov 26 00:05:40 crc kubenswrapper[5045]: E1126 00:05:40.016974 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9bef72a-aee1-42c0-b740-2ec9df18b70e" containerName="extract-utilities" Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.017080 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9bef72a-aee1-42c0-b740-2ec9df18b70e" containerName="extract-utilities" Nov 26 00:05:40 crc kubenswrapper[5045]: E1126 00:05:40.017168 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1093fd7-5afc-4afd-be60-04a0885dbf62" containerName="copy" Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.017223 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1093fd7-5afc-4afd-be60-04a0885dbf62" containerName="copy" Nov 26 00:05:40 crc kubenswrapper[5045]: E1126 00:05:40.017287 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9bef72a-aee1-42c0-b740-2ec9df18b70e" containerName="extract-content" Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.017340 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9bef72a-aee1-42c0-b740-2ec9df18b70e" containerName="extract-content" Nov 26 00:05:40 crc kubenswrapper[5045]: E1126 00:05:40.017400 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9bef72a-aee1-42c0-b740-2ec9df18b70e" containerName="registry-server" Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.017453 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9bef72a-aee1-42c0-b740-2ec9df18b70e" containerName="registry-server" Nov 26 00:05:40 crc kubenswrapper[5045]: E1126 00:05:40.017510 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1093fd7-5afc-4afd-be60-04a0885dbf62" containerName="gather" Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.017573 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1093fd7-5afc-4afd-be60-04a0885dbf62" containerName="gather" Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.017873 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9bef72a-aee1-42c0-b740-2ec9df18b70e" containerName="registry-server" Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.017971 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1093fd7-5afc-4afd-be60-04a0885dbf62" containerName="gather" Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.018087 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1093fd7-5afc-4afd-be60-04a0885dbf62" containerName="copy" Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.019586 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-stwvx" Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.042292 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-stwvx"] Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.105627 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgpnp\" (UniqueName: \"kubernetes.io/projected/84039b0c-1443-4c69-870f-4ce294bd5741-kube-api-access-tgpnp\") pod \"redhat-marketplace-stwvx\" (UID: \"84039b0c-1443-4c69-870f-4ce294bd5741\") " pod="openshift-marketplace/redhat-marketplace-stwvx" Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.105800 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84039b0c-1443-4c69-870f-4ce294bd5741-utilities\") pod \"redhat-marketplace-stwvx\" (UID: \"84039b0c-1443-4c69-870f-4ce294bd5741\") " pod="openshift-marketplace/redhat-marketplace-stwvx" Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.106063 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84039b0c-1443-4c69-870f-4ce294bd5741-catalog-content\") pod \"redhat-marketplace-stwvx\" (UID: \"84039b0c-1443-4c69-870f-4ce294bd5741\") " pod="openshift-marketplace/redhat-marketplace-stwvx" Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.208217 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgpnp\" (UniqueName: \"kubernetes.io/projected/84039b0c-1443-4c69-870f-4ce294bd5741-kube-api-access-tgpnp\") pod \"redhat-marketplace-stwvx\" (UID: \"84039b0c-1443-4c69-870f-4ce294bd5741\") " pod="openshift-marketplace/redhat-marketplace-stwvx" Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.208317 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84039b0c-1443-4c69-870f-4ce294bd5741-utilities\") pod \"redhat-marketplace-stwvx\" (UID: \"84039b0c-1443-4c69-870f-4ce294bd5741\") " pod="openshift-marketplace/redhat-marketplace-stwvx" Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.208367 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84039b0c-1443-4c69-870f-4ce294bd5741-catalog-content\") pod \"redhat-marketplace-stwvx\" (UID: \"84039b0c-1443-4c69-870f-4ce294bd5741\") " pod="openshift-marketplace/redhat-marketplace-stwvx" Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.208926 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84039b0c-1443-4c69-870f-4ce294bd5741-utilities\") pod \"redhat-marketplace-stwvx\" (UID: \"84039b0c-1443-4c69-870f-4ce294bd5741\") " pod="openshift-marketplace/redhat-marketplace-stwvx" Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.208954 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84039b0c-1443-4c69-870f-4ce294bd5741-catalog-content\") pod \"redhat-marketplace-stwvx\" (UID: \"84039b0c-1443-4c69-870f-4ce294bd5741\") " pod="openshift-marketplace/redhat-marketplace-stwvx" Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.233200 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgpnp\" (UniqueName: \"kubernetes.io/projected/84039b0c-1443-4c69-870f-4ce294bd5741-kube-api-access-tgpnp\") pod \"redhat-marketplace-stwvx\" (UID: \"84039b0c-1443-4c69-870f-4ce294bd5741\") " pod="openshift-marketplace/redhat-marketplace-stwvx" Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.351260 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-stwvx" Nov 26 00:05:40 crc kubenswrapper[5045]: I1126 00:05:40.798344 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-stwvx"] Nov 26 00:05:41 crc kubenswrapper[5045]: I1126 00:05:41.075558 5045 generic.go:334] "Generic (PLEG): container finished" podID="84039b0c-1443-4c69-870f-4ce294bd5741" containerID="8fc1a0fb25c397a7bd874e758582dee58c54e98f61a95442488d5ffb505fcbde" exitCode=0 Nov 26 00:05:41 crc kubenswrapper[5045]: I1126 00:05:41.075683 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-stwvx" event={"ID":"84039b0c-1443-4c69-870f-4ce294bd5741","Type":"ContainerDied","Data":"8fc1a0fb25c397a7bd874e758582dee58c54e98f61a95442488d5ffb505fcbde"} Nov 26 00:05:41 crc kubenswrapper[5045]: I1126 00:05:41.075972 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-stwvx" event={"ID":"84039b0c-1443-4c69-870f-4ce294bd5741","Type":"ContainerStarted","Data":"a73017793e85e4968cbae26bb4119af6613e1cf8b03de1962ad0decc11a2e9ad"} Nov 26 00:05:42 crc kubenswrapper[5045]: I1126 00:05:42.085176 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-stwvx" event={"ID":"84039b0c-1443-4c69-870f-4ce294bd5741","Type":"ContainerStarted","Data":"f4e86ecc209876ffc064047b63d6e43f3e7a9f1fb5c781effa31dd1b6d560cbb"} Nov 26 00:05:43 crc kubenswrapper[5045]: I1126 00:05:43.102230 5045 generic.go:334] "Generic (PLEG): container finished" podID="84039b0c-1443-4c69-870f-4ce294bd5741" containerID="f4e86ecc209876ffc064047b63d6e43f3e7a9f1fb5c781effa31dd1b6d560cbb" exitCode=0 Nov 26 00:05:43 crc kubenswrapper[5045]: I1126 00:05:43.102326 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-stwvx" event={"ID":"84039b0c-1443-4c69-870f-4ce294bd5741","Type":"ContainerDied","Data":"f4e86ecc209876ffc064047b63d6e43f3e7a9f1fb5c781effa31dd1b6d560cbb"} Nov 26 00:05:44 crc kubenswrapper[5045]: I1126 00:05:44.116036 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-stwvx" event={"ID":"84039b0c-1443-4c69-870f-4ce294bd5741","Type":"ContainerStarted","Data":"a238fbe3e9c2ab3e3263c069005d81ba4f754f9df4e5ecca3642ae2c0a551c6e"} Nov 26 00:05:44 crc kubenswrapper[5045]: I1126 00:05:44.142302 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-stwvx" podStartSLOduration=2.6279912039999997 podStartE2EDuration="5.142278695s" podCreationTimestamp="2025-11-26 00:05:39 +0000 UTC" firstStartedPulling="2025-11-26 00:05:41.077310752 +0000 UTC m=+3997.434969864" lastFinishedPulling="2025-11-26 00:05:43.591598203 +0000 UTC m=+3999.949257355" observedRunningTime="2025-11-26 00:05:44.135075992 +0000 UTC m=+4000.492735104" watchObservedRunningTime="2025-11-26 00:05:44.142278695 +0000 UTC m=+4000.499937837" Nov 26 00:05:50 crc kubenswrapper[5045]: I1126 00:05:50.351666 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-stwvx" Nov 26 00:05:50 crc kubenswrapper[5045]: I1126 00:05:50.352334 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-stwvx" Nov 26 00:05:50 crc kubenswrapper[5045]: I1126 00:05:50.416631 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-stwvx" Nov 26 00:05:51 crc kubenswrapper[5045]: I1126 00:05:51.252925 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-stwvx" Nov 26 00:05:51 crc kubenswrapper[5045]: I1126 00:05:51.319457 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-stwvx"] Nov 26 00:05:53 crc kubenswrapper[5045]: I1126 00:05:53.213044 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-stwvx" podUID="84039b0c-1443-4c69-870f-4ce294bd5741" containerName="registry-server" containerID="cri-o://a238fbe3e9c2ab3e3263c069005d81ba4f754f9df4e5ecca3642ae2c0a551c6e" gracePeriod=2 Nov 26 00:05:53 crc kubenswrapper[5045]: I1126 00:05:53.728897 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-stwvx" Nov 26 00:05:53 crc kubenswrapper[5045]: I1126 00:05:53.807444 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tgpnp\" (UniqueName: \"kubernetes.io/projected/84039b0c-1443-4c69-870f-4ce294bd5741-kube-api-access-tgpnp\") pod \"84039b0c-1443-4c69-870f-4ce294bd5741\" (UID: \"84039b0c-1443-4c69-870f-4ce294bd5741\") " Nov 26 00:05:53 crc kubenswrapper[5045]: I1126 00:05:53.807878 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84039b0c-1443-4c69-870f-4ce294bd5741-utilities\") pod \"84039b0c-1443-4c69-870f-4ce294bd5741\" (UID: \"84039b0c-1443-4c69-870f-4ce294bd5741\") " Nov 26 00:05:53 crc kubenswrapper[5045]: I1126 00:05:53.807914 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84039b0c-1443-4c69-870f-4ce294bd5741-catalog-content\") pod \"84039b0c-1443-4c69-870f-4ce294bd5741\" (UID: \"84039b0c-1443-4c69-870f-4ce294bd5741\") " Nov 26 00:05:53 crc kubenswrapper[5045]: I1126 00:05:53.809009 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84039b0c-1443-4c69-870f-4ce294bd5741-utilities" (OuterVolumeSpecName: "utilities") pod "84039b0c-1443-4c69-870f-4ce294bd5741" (UID: "84039b0c-1443-4c69-870f-4ce294bd5741"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:05:53 crc kubenswrapper[5045]: I1126 00:05:53.817590 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84039b0c-1443-4c69-870f-4ce294bd5741-kube-api-access-tgpnp" (OuterVolumeSpecName: "kube-api-access-tgpnp") pod "84039b0c-1443-4c69-870f-4ce294bd5741" (UID: "84039b0c-1443-4c69-870f-4ce294bd5741"). InnerVolumeSpecName "kube-api-access-tgpnp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:05:53 crc kubenswrapper[5045]: I1126 00:05:53.832496 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84039b0c-1443-4c69-870f-4ce294bd5741-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "84039b0c-1443-4c69-870f-4ce294bd5741" (UID: "84039b0c-1443-4c69-870f-4ce294bd5741"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:05:53 crc kubenswrapper[5045]: I1126 00:05:53.910592 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tgpnp\" (UniqueName: \"kubernetes.io/projected/84039b0c-1443-4c69-870f-4ce294bd5741-kube-api-access-tgpnp\") on node \"crc\" DevicePath \"\"" Nov 26 00:05:53 crc kubenswrapper[5045]: I1126 00:05:53.910645 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84039b0c-1443-4c69-870f-4ce294bd5741-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:05:53 crc kubenswrapper[5045]: I1126 00:05:53.910657 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84039b0c-1443-4c69-870f-4ce294bd5741-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:05:54 crc kubenswrapper[5045]: I1126 00:05:54.222906 5045 generic.go:334] "Generic (PLEG): container finished" podID="84039b0c-1443-4c69-870f-4ce294bd5741" containerID="a238fbe3e9c2ab3e3263c069005d81ba4f754f9df4e5ecca3642ae2c0a551c6e" exitCode=0 Nov 26 00:05:54 crc kubenswrapper[5045]: I1126 00:05:54.222948 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-stwvx" event={"ID":"84039b0c-1443-4c69-870f-4ce294bd5741","Type":"ContainerDied","Data":"a238fbe3e9c2ab3e3263c069005d81ba4f754f9df4e5ecca3642ae2c0a551c6e"} Nov 26 00:05:54 crc kubenswrapper[5045]: I1126 00:05:54.222987 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-stwvx" Nov 26 00:05:54 crc kubenswrapper[5045]: I1126 00:05:54.223007 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-stwvx" event={"ID":"84039b0c-1443-4c69-870f-4ce294bd5741","Type":"ContainerDied","Data":"a73017793e85e4968cbae26bb4119af6613e1cf8b03de1962ad0decc11a2e9ad"} Nov 26 00:05:54 crc kubenswrapper[5045]: I1126 00:05:54.223029 5045 scope.go:117] "RemoveContainer" containerID="a238fbe3e9c2ab3e3263c069005d81ba4f754f9df4e5ecca3642ae2c0a551c6e" Nov 26 00:05:54 crc kubenswrapper[5045]: I1126 00:05:54.248232 5045 scope.go:117] "RemoveContainer" containerID="f4e86ecc209876ffc064047b63d6e43f3e7a9f1fb5c781effa31dd1b6d560cbb" Nov 26 00:05:54 crc kubenswrapper[5045]: I1126 00:05:54.270834 5045 scope.go:117] "RemoveContainer" containerID="8fc1a0fb25c397a7bd874e758582dee58c54e98f61a95442488d5ffb505fcbde" Nov 26 00:05:54 crc kubenswrapper[5045]: I1126 00:05:54.275149 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-stwvx"] Nov 26 00:05:54 crc kubenswrapper[5045]: I1126 00:05:54.285682 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-stwvx"] Nov 26 00:05:54 crc kubenswrapper[5045]: I1126 00:05:54.322521 5045 scope.go:117] "RemoveContainer" containerID="a238fbe3e9c2ab3e3263c069005d81ba4f754f9df4e5ecca3642ae2c0a551c6e" Nov 26 00:05:54 crc kubenswrapper[5045]: E1126 00:05:54.322838 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a238fbe3e9c2ab3e3263c069005d81ba4f754f9df4e5ecca3642ae2c0a551c6e\": container with ID starting with a238fbe3e9c2ab3e3263c069005d81ba4f754f9df4e5ecca3642ae2c0a551c6e not found: ID does not exist" containerID="a238fbe3e9c2ab3e3263c069005d81ba4f754f9df4e5ecca3642ae2c0a551c6e" Nov 26 00:05:54 crc kubenswrapper[5045]: I1126 00:05:54.322880 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a238fbe3e9c2ab3e3263c069005d81ba4f754f9df4e5ecca3642ae2c0a551c6e"} err="failed to get container status \"a238fbe3e9c2ab3e3263c069005d81ba4f754f9df4e5ecca3642ae2c0a551c6e\": rpc error: code = NotFound desc = could not find container \"a238fbe3e9c2ab3e3263c069005d81ba4f754f9df4e5ecca3642ae2c0a551c6e\": container with ID starting with a238fbe3e9c2ab3e3263c069005d81ba4f754f9df4e5ecca3642ae2c0a551c6e not found: ID does not exist" Nov 26 00:05:54 crc kubenswrapper[5045]: I1126 00:05:54.322901 5045 scope.go:117] "RemoveContainer" containerID="f4e86ecc209876ffc064047b63d6e43f3e7a9f1fb5c781effa31dd1b6d560cbb" Nov 26 00:05:54 crc kubenswrapper[5045]: E1126 00:05:54.324373 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4e86ecc209876ffc064047b63d6e43f3e7a9f1fb5c781effa31dd1b6d560cbb\": container with ID starting with f4e86ecc209876ffc064047b63d6e43f3e7a9f1fb5c781effa31dd1b6d560cbb not found: ID does not exist" containerID="f4e86ecc209876ffc064047b63d6e43f3e7a9f1fb5c781effa31dd1b6d560cbb" Nov 26 00:05:54 crc kubenswrapper[5045]: I1126 00:05:54.324407 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4e86ecc209876ffc064047b63d6e43f3e7a9f1fb5c781effa31dd1b6d560cbb"} err="failed to get container status \"f4e86ecc209876ffc064047b63d6e43f3e7a9f1fb5c781effa31dd1b6d560cbb\": rpc error: code = NotFound desc = could not find container \"f4e86ecc209876ffc064047b63d6e43f3e7a9f1fb5c781effa31dd1b6d560cbb\": container with ID starting with f4e86ecc209876ffc064047b63d6e43f3e7a9f1fb5c781effa31dd1b6d560cbb not found: ID does not exist" Nov 26 00:05:54 crc kubenswrapper[5045]: I1126 00:05:54.324425 5045 scope.go:117] "RemoveContainer" containerID="8fc1a0fb25c397a7bd874e758582dee58c54e98f61a95442488d5ffb505fcbde" Nov 26 00:05:54 crc kubenswrapper[5045]: E1126 00:05:54.324783 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8fc1a0fb25c397a7bd874e758582dee58c54e98f61a95442488d5ffb505fcbde\": container with ID starting with 8fc1a0fb25c397a7bd874e758582dee58c54e98f61a95442488d5ffb505fcbde not found: ID does not exist" containerID="8fc1a0fb25c397a7bd874e758582dee58c54e98f61a95442488d5ffb505fcbde" Nov 26 00:05:54 crc kubenswrapper[5045]: I1126 00:05:54.324819 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8fc1a0fb25c397a7bd874e758582dee58c54e98f61a95442488d5ffb505fcbde"} err="failed to get container status \"8fc1a0fb25c397a7bd874e758582dee58c54e98f61a95442488d5ffb505fcbde\": rpc error: code = NotFound desc = could not find container \"8fc1a0fb25c397a7bd874e758582dee58c54e98f61a95442488d5ffb505fcbde\": container with ID starting with 8fc1a0fb25c397a7bd874e758582dee58c54e98f61a95442488d5ffb505fcbde not found: ID does not exist" Nov 26 00:05:54 crc kubenswrapper[5045]: I1126 00:05:54.440510 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84039b0c-1443-4c69-870f-4ce294bd5741" path="/var/lib/kubelet/pods/84039b0c-1443-4c69-870f-4ce294bd5741/volumes" Nov 26 00:06:17 crc kubenswrapper[5045]: I1126 00:06:17.071106 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-vcnbd/must-gather-5xj6j"] Nov 26 00:06:17 crc kubenswrapper[5045]: E1126 00:06:17.072154 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84039b0c-1443-4c69-870f-4ce294bd5741" containerName="extract-content" Nov 26 00:06:17 crc kubenswrapper[5045]: I1126 00:06:17.072173 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="84039b0c-1443-4c69-870f-4ce294bd5741" containerName="extract-content" Nov 26 00:06:17 crc kubenswrapper[5045]: E1126 00:06:17.072206 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84039b0c-1443-4c69-870f-4ce294bd5741" containerName="registry-server" Nov 26 00:06:17 crc kubenswrapper[5045]: I1126 00:06:17.072214 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="84039b0c-1443-4c69-870f-4ce294bd5741" containerName="registry-server" Nov 26 00:06:17 crc kubenswrapper[5045]: E1126 00:06:17.072247 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84039b0c-1443-4c69-870f-4ce294bd5741" containerName="extract-utilities" Nov 26 00:06:17 crc kubenswrapper[5045]: I1126 00:06:17.072255 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="84039b0c-1443-4c69-870f-4ce294bd5741" containerName="extract-utilities" Nov 26 00:06:17 crc kubenswrapper[5045]: I1126 00:06:17.072417 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="84039b0c-1443-4c69-870f-4ce294bd5741" containerName="registry-server" Nov 26 00:06:17 crc kubenswrapper[5045]: I1126 00:06:17.073398 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vcnbd/must-gather-5xj6j" Nov 26 00:06:17 crc kubenswrapper[5045]: I1126 00:06:17.075607 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-vcnbd"/"default-dockercfg-tkljg" Nov 26 00:06:17 crc kubenswrapper[5045]: I1126 00:06:17.075943 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-vcnbd"/"openshift-service-ca.crt" Nov 26 00:06:17 crc kubenswrapper[5045]: I1126 00:06:17.076096 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-vcnbd"/"kube-root-ca.crt" Nov 26 00:06:17 crc kubenswrapper[5045]: I1126 00:06:17.081042 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-vcnbd/must-gather-5xj6j"] Nov 26 00:06:17 crc kubenswrapper[5045]: I1126 00:06:17.135977 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8-must-gather-output\") pod \"must-gather-5xj6j\" (UID: \"3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8\") " pod="openshift-must-gather-vcnbd/must-gather-5xj6j" Nov 26 00:06:17 crc kubenswrapper[5045]: I1126 00:06:17.136040 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m42vk\" (UniqueName: \"kubernetes.io/projected/3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8-kube-api-access-m42vk\") pod \"must-gather-5xj6j\" (UID: \"3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8\") " pod="openshift-must-gather-vcnbd/must-gather-5xj6j" Nov 26 00:06:17 crc kubenswrapper[5045]: I1126 00:06:17.238221 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8-must-gather-output\") pod \"must-gather-5xj6j\" (UID: \"3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8\") " pod="openshift-must-gather-vcnbd/must-gather-5xj6j" Nov 26 00:06:17 crc kubenswrapper[5045]: I1126 00:06:17.238270 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m42vk\" (UniqueName: \"kubernetes.io/projected/3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8-kube-api-access-m42vk\") pod \"must-gather-5xj6j\" (UID: \"3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8\") " pod="openshift-must-gather-vcnbd/must-gather-5xj6j" Nov 26 00:06:17 crc kubenswrapper[5045]: I1126 00:06:17.238932 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8-must-gather-output\") pod \"must-gather-5xj6j\" (UID: \"3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8\") " pod="openshift-must-gather-vcnbd/must-gather-5xj6j" Nov 26 00:06:17 crc kubenswrapper[5045]: I1126 00:06:17.263495 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m42vk\" (UniqueName: \"kubernetes.io/projected/3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8-kube-api-access-m42vk\") pod \"must-gather-5xj6j\" (UID: \"3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8\") " pod="openshift-must-gather-vcnbd/must-gather-5xj6j" Nov 26 00:06:17 crc kubenswrapper[5045]: I1126 00:06:17.397892 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vcnbd/must-gather-5xj6j" Nov 26 00:06:17 crc kubenswrapper[5045]: I1126 00:06:17.886187 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-vcnbd/must-gather-5xj6j"] Nov 26 00:06:18 crc kubenswrapper[5045]: I1126 00:06:18.491246 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vcnbd/must-gather-5xj6j" event={"ID":"3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8","Type":"ContainerStarted","Data":"ee8d848ca6a087bc1fda35bb09f5b6f7c002f5d4d4905a968124dd47d380b93b"} Nov 26 00:06:18 crc kubenswrapper[5045]: I1126 00:06:18.491649 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vcnbd/must-gather-5xj6j" event={"ID":"3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8","Type":"ContainerStarted","Data":"177da48c7721c2b19db290d18d451f9c77259fa3079bf7b6cf00f16597ae1f5e"} Nov 26 00:06:18 crc kubenswrapper[5045]: I1126 00:06:18.491671 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vcnbd/must-gather-5xj6j" event={"ID":"3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8","Type":"ContainerStarted","Data":"cff22f0d34a3b71bfde6acd41ac9bfa391c6690cd6259cd7222a1b327afedef3"} Nov 26 00:06:18 crc kubenswrapper[5045]: I1126 00:06:18.511742 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-vcnbd/must-gather-5xj6j" podStartSLOduration=1.511695405 podStartE2EDuration="1.511695405s" podCreationTimestamp="2025-11-26 00:06:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:06:18.506086928 +0000 UTC m=+4034.863746070" watchObservedRunningTime="2025-11-26 00:06:18.511695405 +0000 UTC m=+4034.869354547" Nov 26 00:06:21 crc kubenswrapper[5045]: I1126 00:06:21.822420 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-vcnbd/crc-debug-rgxs2"] Nov 26 00:06:21 crc kubenswrapper[5045]: I1126 00:06:21.824798 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vcnbd/crc-debug-rgxs2" Nov 26 00:06:21 crc kubenswrapper[5045]: I1126 00:06:21.934645 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rk7h\" (UniqueName: \"kubernetes.io/projected/e02cfedf-3e9f-46a2-b046-d1d43c4d5622-kube-api-access-6rk7h\") pod \"crc-debug-rgxs2\" (UID: \"e02cfedf-3e9f-46a2-b046-d1d43c4d5622\") " pod="openshift-must-gather-vcnbd/crc-debug-rgxs2" Nov 26 00:06:21 crc kubenswrapper[5045]: I1126 00:06:21.934697 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e02cfedf-3e9f-46a2-b046-d1d43c4d5622-host\") pod \"crc-debug-rgxs2\" (UID: \"e02cfedf-3e9f-46a2-b046-d1d43c4d5622\") " pod="openshift-must-gather-vcnbd/crc-debug-rgxs2" Nov 26 00:06:22 crc kubenswrapper[5045]: I1126 00:06:22.036262 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rk7h\" (UniqueName: \"kubernetes.io/projected/e02cfedf-3e9f-46a2-b046-d1d43c4d5622-kube-api-access-6rk7h\") pod \"crc-debug-rgxs2\" (UID: \"e02cfedf-3e9f-46a2-b046-d1d43c4d5622\") " pod="openshift-must-gather-vcnbd/crc-debug-rgxs2" Nov 26 00:06:22 crc kubenswrapper[5045]: I1126 00:06:22.036312 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e02cfedf-3e9f-46a2-b046-d1d43c4d5622-host\") pod \"crc-debug-rgxs2\" (UID: \"e02cfedf-3e9f-46a2-b046-d1d43c4d5622\") " pod="openshift-must-gather-vcnbd/crc-debug-rgxs2" Nov 26 00:06:22 crc kubenswrapper[5045]: I1126 00:06:22.036532 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e02cfedf-3e9f-46a2-b046-d1d43c4d5622-host\") pod \"crc-debug-rgxs2\" (UID: \"e02cfedf-3e9f-46a2-b046-d1d43c4d5622\") " pod="openshift-must-gather-vcnbd/crc-debug-rgxs2" Nov 26 00:06:22 crc kubenswrapper[5045]: I1126 00:06:22.056968 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rk7h\" (UniqueName: \"kubernetes.io/projected/e02cfedf-3e9f-46a2-b046-d1d43c4d5622-kube-api-access-6rk7h\") pod \"crc-debug-rgxs2\" (UID: \"e02cfedf-3e9f-46a2-b046-d1d43c4d5622\") " pod="openshift-must-gather-vcnbd/crc-debug-rgxs2" Nov 26 00:06:22 crc kubenswrapper[5045]: I1126 00:06:22.148037 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vcnbd/crc-debug-rgxs2" Nov 26 00:06:22 crc kubenswrapper[5045]: I1126 00:06:22.530499 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vcnbd/crc-debug-rgxs2" event={"ID":"e02cfedf-3e9f-46a2-b046-d1d43c4d5622","Type":"ContainerStarted","Data":"3b7bedc97b3cda78e2393a0aae2b103fbc7fc5da8273c80359f0ab3a3bd2f742"} Nov 26 00:06:22 crc kubenswrapper[5045]: I1126 00:06:22.530552 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vcnbd/crc-debug-rgxs2" event={"ID":"e02cfedf-3e9f-46a2-b046-d1d43c4d5622","Type":"ContainerStarted","Data":"ac9be10ced8588daffb968905d38fba932f0af8cf1768358aee66a87471eb943"} Nov 26 00:06:22 crc kubenswrapper[5045]: I1126 00:06:22.550303 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-vcnbd/crc-debug-rgxs2" podStartSLOduration=1.550283663 podStartE2EDuration="1.550283663s" podCreationTimestamp="2025-11-26 00:06:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:06:22.544645824 +0000 UTC m=+4038.902304956" watchObservedRunningTime="2025-11-26 00:06:22.550283663 +0000 UTC m=+4038.907942785" Nov 26 00:06:30 crc kubenswrapper[5045]: I1126 00:06:30.730082 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jmsbh"] Nov 26 00:06:30 crc kubenswrapper[5045]: I1126 00:06:30.732387 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jmsbh" Nov 26 00:06:30 crc kubenswrapper[5045]: I1126 00:06:30.753677 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jmsbh"] Nov 26 00:06:30 crc kubenswrapper[5045]: I1126 00:06:30.807187 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jf4s4\" (UniqueName: \"kubernetes.io/projected/68a44ad5-dbd7-460c-a449-038cffc19b7e-kube-api-access-jf4s4\") pod \"certified-operators-jmsbh\" (UID: \"68a44ad5-dbd7-460c-a449-038cffc19b7e\") " pod="openshift-marketplace/certified-operators-jmsbh" Nov 26 00:06:30 crc kubenswrapper[5045]: I1126 00:06:30.807265 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68a44ad5-dbd7-460c-a449-038cffc19b7e-utilities\") pod \"certified-operators-jmsbh\" (UID: \"68a44ad5-dbd7-460c-a449-038cffc19b7e\") " pod="openshift-marketplace/certified-operators-jmsbh" Nov 26 00:06:30 crc kubenswrapper[5045]: I1126 00:06:30.807309 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68a44ad5-dbd7-460c-a449-038cffc19b7e-catalog-content\") pod \"certified-operators-jmsbh\" (UID: \"68a44ad5-dbd7-460c-a449-038cffc19b7e\") " pod="openshift-marketplace/certified-operators-jmsbh" Nov 26 00:06:30 crc kubenswrapper[5045]: I1126 00:06:30.908652 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jf4s4\" (UniqueName: \"kubernetes.io/projected/68a44ad5-dbd7-460c-a449-038cffc19b7e-kube-api-access-jf4s4\") pod \"certified-operators-jmsbh\" (UID: \"68a44ad5-dbd7-460c-a449-038cffc19b7e\") " pod="openshift-marketplace/certified-operators-jmsbh" Nov 26 00:06:30 crc kubenswrapper[5045]: I1126 00:06:30.908747 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68a44ad5-dbd7-460c-a449-038cffc19b7e-utilities\") pod \"certified-operators-jmsbh\" (UID: \"68a44ad5-dbd7-460c-a449-038cffc19b7e\") " pod="openshift-marketplace/certified-operators-jmsbh" Nov 26 00:06:30 crc kubenswrapper[5045]: I1126 00:06:30.908790 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68a44ad5-dbd7-460c-a449-038cffc19b7e-catalog-content\") pod \"certified-operators-jmsbh\" (UID: \"68a44ad5-dbd7-460c-a449-038cffc19b7e\") " pod="openshift-marketplace/certified-operators-jmsbh" Nov 26 00:06:30 crc kubenswrapper[5045]: I1126 00:06:30.909356 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68a44ad5-dbd7-460c-a449-038cffc19b7e-catalog-content\") pod \"certified-operators-jmsbh\" (UID: \"68a44ad5-dbd7-460c-a449-038cffc19b7e\") " pod="openshift-marketplace/certified-operators-jmsbh" Nov 26 00:06:30 crc kubenswrapper[5045]: I1126 00:06:30.909415 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68a44ad5-dbd7-460c-a449-038cffc19b7e-utilities\") pod \"certified-operators-jmsbh\" (UID: \"68a44ad5-dbd7-460c-a449-038cffc19b7e\") " pod="openshift-marketplace/certified-operators-jmsbh" Nov 26 00:06:30 crc kubenswrapper[5045]: I1126 00:06:30.939073 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jf4s4\" (UniqueName: \"kubernetes.io/projected/68a44ad5-dbd7-460c-a449-038cffc19b7e-kube-api-access-jf4s4\") pod \"certified-operators-jmsbh\" (UID: \"68a44ad5-dbd7-460c-a449-038cffc19b7e\") " pod="openshift-marketplace/certified-operators-jmsbh" Nov 26 00:06:31 crc kubenswrapper[5045]: I1126 00:06:31.212938 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jmsbh" Nov 26 00:06:31 crc kubenswrapper[5045]: I1126 00:06:31.753181 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jmsbh"] Nov 26 00:06:32 crc kubenswrapper[5045]: I1126 00:06:32.611358 5045 generic.go:334] "Generic (PLEG): container finished" podID="68a44ad5-dbd7-460c-a449-038cffc19b7e" containerID="8461b8d205ff89ec5ec3152dfce1af2724ce671df13b85ebb37369340e4e7025" exitCode=0 Nov 26 00:06:32 crc kubenswrapper[5045]: I1126 00:06:32.611456 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jmsbh" event={"ID":"68a44ad5-dbd7-460c-a449-038cffc19b7e","Type":"ContainerDied","Data":"8461b8d205ff89ec5ec3152dfce1af2724ce671df13b85ebb37369340e4e7025"} Nov 26 00:06:32 crc kubenswrapper[5045]: I1126 00:06:32.611609 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jmsbh" event={"ID":"68a44ad5-dbd7-460c-a449-038cffc19b7e","Type":"ContainerStarted","Data":"586ef71b59bd0c43410a7002a8d73c56953f7003ad48457f44b369e1e29cc398"} Nov 26 00:06:33 crc kubenswrapper[5045]: I1126 00:06:33.622592 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jmsbh" event={"ID":"68a44ad5-dbd7-460c-a449-038cffc19b7e","Type":"ContainerStarted","Data":"b94a5d6d5cd392eb8fa0d24ea89af8822f1e71b96680cb2b8914c1504d8101c1"} Nov 26 00:06:35 crc kubenswrapper[5045]: I1126 00:06:35.640270 5045 generic.go:334] "Generic (PLEG): container finished" podID="68a44ad5-dbd7-460c-a449-038cffc19b7e" containerID="b94a5d6d5cd392eb8fa0d24ea89af8822f1e71b96680cb2b8914c1504d8101c1" exitCode=0 Nov 26 00:06:35 crc kubenswrapper[5045]: I1126 00:06:35.640346 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jmsbh" event={"ID":"68a44ad5-dbd7-460c-a449-038cffc19b7e","Type":"ContainerDied","Data":"b94a5d6d5cd392eb8fa0d24ea89af8822f1e71b96680cb2b8914c1504d8101c1"} Nov 26 00:06:37 crc kubenswrapper[5045]: I1126 00:06:37.671555 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jmsbh" event={"ID":"68a44ad5-dbd7-460c-a449-038cffc19b7e","Type":"ContainerStarted","Data":"d9d713c2355d738203ce8ce5c93bc5ce8f4bbf6ae367b78814d49fb4af766a61"} Nov 26 00:06:37 crc kubenswrapper[5045]: I1126 00:06:37.691936 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jmsbh" podStartSLOduration=3.481936155 podStartE2EDuration="7.691912418s" podCreationTimestamp="2025-11-26 00:06:30 +0000 UTC" firstStartedPulling="2025-11-26 00:06:32.613241378 +0000 UTC m=+4048.970900480" lastFinishedPulling="2025-11-26 00:06:36.823217631 +0000 UTC m=+4053.180876743" observedRunningTime="2025-11-26 00:06:37.687845274 +0000 UTC m=+4054.045504386" watchObservedRunningTime="2025-11-26 00:06:37.691912418 +0000 UTC m=+4054.049571530" Nov 26 00:06:41 crc kubenswrapper[5045]: I1126 00:06:41.214005 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jmsbh" Nov 26 00:06:41 crc kubenswrapper[5045]: I1126 00:06:41.214551 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jmsbh" Nov 26 00:06:41 crc kubenswrapper[5045]: I1126 00:06:41.281193 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jmsbh" Nov 26 00:06:51 crc kubenswrapper[5045]: I1126 00:06:51.270381 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jmsbh" Nov 26 00:06:51 crc kubenswrapper[5045]: I1126 00:06:51.325520 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jmsbh"] Nov 26 00:06:51 crc kubenswrapper[5045]: I1126 00:06:51.794515 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-jmsbh" podUID="68a44ad5-dbd7-460c-a449-038cffc19b7e" containerName="registry-server" containerID="cri-o://d9d713c2355d738203ce8ce5c93bc5ce8f4bbf6ae367b78814d49fb4af766a61" gracePeriod=2 Nov 26 00:06:52 crc kubenswrapper[5045]: I1126 00:06:52.264236 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jmsbh" Nov 26 00:06:52 crc kubenswrapper[5045]: I1126 00:06:52.329312 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68a44ad5-dbd7-460c-a449-038cffc19b7e-catalog-content\") pod \"68a44ad5-dbd7-460c-a449-038cffc19b7e\" (UID: \"68a44ad5-dbd7-460c-a449-038cffc19b7e\") " Nov 26 00:06:52 crc kubenswrapper[5045]: I1126 00:06:52.329346 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68a44ad5-dbd7-460c-a449-038cffc19b7e-utilities\") pod \"68a44ad5-dbd7-460c-a449-038cffc19b7e\" (UID: \"68a44ad5-dbd7-460c-a449-038cffc19b7e\") " Nov 26 00:06:52 crc kubenswrapper[5045]: I1126 00:06:52.329412 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jf4s4\" (UniqueName: \"kubernetes.io/projected/68a44ad5-dbd7-460c-a449-038cffc19b7e-kube-api-access-jf4s4\") pod \"68a44ad5-dbd7-460c-a449-038cffc19b7e\" (UID: \"68a44ad5-dbd7-460c-a449-038cffc19b7e\") " Nov 26 00:06:52 crc kubenswrapper[5045]: I1126 00:06:52.332710 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/68a44ad5-dbd7-460c-a449-038cffc19b7e-utilities" (OuterVolumeSpecName: "utilities") pod "68a44ad5-dbd7-460c-a449-038cffc19b7e" (UID: "68a44ad5-dbd7-460c-a449-038cffc19b7e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:06:52 crc kubenswrapper[5045]: I1126 00:06:52.339166 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68a44ad5-dbd7-460c-a449-038cffc19b7e-kube-api-access-jf4s4" (OuterVolumeSpecName: "kube-api-access-jf4s4") pod "68a44ad5-dbd7-460c-a449-038cffc19b7e" (UID: "68a44ad5-dbd7-460c-a449-038cffc19b7e"). InnerVolumeSpecName "kube-api-access-jf4s4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:06:52 crc kubenswrapper[5045]: I1126 00:06:52.408125 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/68a44ad5-dbd7-460c-a449-038cffc19b7e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "68a44ad5-dbd7-460c-a449-038cffc19b7e" (UID: "68a44ad5-dbd7-460c-a449-038cffc19b7e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:06:52 crc kubenswrapper[5045]: I1126 00:06:52.432069 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jf4s4\" (UniqueName: \"kubernetes.io/projected/68a44ad5-dbd7-460c-a449-038cffc19b7e-kube-api-access-jf4s4\") on node \"crc\" DevicePath \"\"" Nov 26 00:06:52 crc kubenswrapper[5045]: I1126 00:06:52.432097 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68a44ad5-dbd7-460c-a449-038cffc19b7e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:06:52 crc kubenswrapper[5045]: I1126 00:06:52.432107 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68a44ad5-dbd7-460c-a449-038cffc19b7e-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:06:52 crc kubenswrapper[5045]: I1126 00:06:52.807904 5045 generic.go:334] "Generic (PLEG): container finished" podID="68a44ad5-dbd7-460c-a449-038cffc19b7e" containerID="d9d713c2355d738203ce8ce5c93bc5ce8f4bbf6ae367b78814d49fb4af766a61" exitCode=0 Nov 26 00:06:52 crc kubenswrapper[5045]: I1126 00:06:52.807960 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jmsbh" event={"ID":"68a44ad5-dbd7-460c-a449-038cffc19b7e","Type":"ContainerDied","Data":"d9d713c2355d738203ce8ce5c93bc5ce8f4bbf6ae367b78814d49fb4af766a61"} Nov 26 00:06:52 crc kubenswrapper[5045]: I1126 00:06:52.808015 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jmsbh" Nov 26 00:06:52 crc kubenswrapper[5045]: I1126 00:06:52.808556 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jmsbh" event={"ID":"68a44ad5-dbd7-460c-a449-038cffc19b7e","Type":"ContainerDied","Data":"586ef71b59bd0c43410a7002a8d73c56953f7003ad48457f44b369e1e29cc398"} Nov 26 00:06:52 crc kubenswrapper[5045]: I1126 00:06:52.808655 5045 scope.go:117] "RemoveContainer" containerID="d9d713c2355d738203ce8ce5c93bc5ce8f4bbf6ae367b78814d49fb4af766a61" Nov 26 00:06:52 crc kubenswrapper[5045]: I1126 00:06:52.832361 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jmsbh"] Nov 26 00:06:52 crc kubenswrapper[5045]: I1126 00:06:52.842394 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-jmsbh"] Nov 26 00:06:52 crc kubenswrapper[5045]: I1126 00:06:52.890639 5045 scope.go:117] "RemoveContainer" containerID="b94a5d6d5cd392eb8fa0d24ea89af8822f1e71b96680cb2b8914c1504d8101c1" Nov 26 00:06:52 crc kubenswrapper[5045]: I1126 00:06:52.918642 5045 scope.go:117] "RemoveContainer" containerID="8461b8d205ff89ec5ec3152dfce1af2724ce671df13b85ebb37369340e4e7025" Nov 26 00:06:53 crc kubenswrapper[5045]: I1126 00:06:53.008466 5045 scope.go:117] "RemoveContainer" containerID="d9d713c2355d738203ce8ce5c93bc5ce8f4bbf6ae367b78814d49fb4af766a61" Nov 26 00:06:53 crc kubenswrapper[5045]: E1126 00:06:53.012414 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9d713c2355d738203ce8ce5c93bc5ce8f4bbf6ae367b78814d49fb4af766a61\": container with ID starting with d9d713c2355d738203ce8ce5c93bc5ce8f4bbf6ae367b78814d49fb4af766a61 not found: ID does not exist" containerID="d9d713c2355d738203ce8ce5c93bc5ce8f4bbf6ae367b78814d49fb4af766a61" Nov 26 00:06:53 crc kubenswrapper[5045]: I1126 00:06:53.012537 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9d713c2355d738203ce8ce5c93bc5ce8f4bbf6ae367b78814d49fb4af766a61"} err="failed to get container status \"d9d713c2355d738203ce8ce5c93bc5ce8f4bbf6ae367b78814d49fb4af766a61\": rpc error: code = NotFound desc = could not find container \"d9d713c2355d738203ce8ce5c93bc5ce8f4bbf6ae367b78814d49fb4af766a61\": container with ID starting with d9d713c2355d738203ce8ce5c93bc5ce8f4bbf6ae367b78814d49fb4af766a61 not found: ID does not exist" Nov 26 00:06:53 crc kubenswrapper[5045]: I1126 00:06:53.012645 5045 scope.go:117] "RemoveContainer" containerID="b94a5d6d5cd392eb8fa0d24ea89af8822f1e71b96680cb2b8914c1504d8101c1" Nov 26 00:06:53 crc kubenswrapper[5045]: E1126 00:06:53.013127 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b94a5d6d5cd392eb8fa0d24ea89af8822f1e71b96680cb2b8914c1504d8101c1\": container with ID starting with b94a5d6d5cd392eb8fa0d24ea89af8822f1e71b96680cb2b8914c1504d8101c1 not found: ID does not exist" containerID="b94a5d6d5cd392eb8fa0d24ea89af8822f1e71b96680cb2b8914c1504d8101c1" Nov 26 00:06:53 crc kubenswrapper[5045]: I1126 00:06:53.013166 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b94a5d6d5cd392eb8fa0d24ea89af8822f1e71b96680cb2b8914c1504d8101c1"} err="failed to get container status \"b94a5d6d5cd392eb8fa0d24ea89af8822f1e71b96680cb2b8914c1504d8101c1\": rpc error: code = NotFound desc = could not find container \"b94a5d6d5cd392eb8fa0d24ea89af8822f1e71b96680cb2b8914c1504d8101c1\": container with ID starting with b94a5d6d5cd392eb8fa0d24ea89af8822f1e71b96680cb2b8914c1504d8101c1 not found: ID does not exist" Nov 26 00:06:53 crc kubenswrapper[5045]: I1126 00:06:53.013193 5045 scope.go:117] "RemoveContainer" containerID="8461b8d205ff89ec5ec3152dfce1af2724ce671df13b85ebb37369340e4e7025" Nov 26 00:06:53 crc kubenswrapper[5045]: E1126 00:06:53.013419 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8461b8d205ff89ec5ec3152dfce1af2724ce671df13b85ebb37369340e4e7025\": container with ID starting with 8461b8d205ff89ec5ec3152dfce1af2724ce671df13b85ebb37369340e4e7025 not found: ID does not exist" containerID="8461b8d205ff89ec5ec3152dfce1af2724ce671df13b85ebb37369340e4e7025" Nov 26 00:06:53 crc kubenswrapper[5045]: I1126 00:06:53.013446 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8461b8d205ff89ec5ec3152dfce1af2724ce671df13b85ebb37369340e4e7025"} err="failed to get container status \"8461b8d205ff89ec5ec3152dfce1af2724ce671df13b85ebb37369340e4e7025\": rpc error: code = NotFound desc = could not find container \"8461b8d205ff89ec5ec3152dfce1af2724ce671df13b85ebb37369340e4e7025\": container with ID starting with 8461b8d205ff89ec5ec3152dfce1af2724ce671df13b85ebb37369340e4e7025 not found: ID does not exist" Nov 26 00:06:54 crc kubenswrapper[5045]: I1126 00:06:54.409912 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68a44ad5-dbd7-460c-a449-038cffc19b7e" path="/var/lib/kubelet/pods/68a44ad5-dbd7-460c-a449-038cffc19b7e/volumes" Nov 26 00:06:56 crc kubenswrapper[5045]: I1126 00:06:56.845161 5045 generic.go:334] "Generic (PLEG): container finished" podID="e02cfedf-3e9f-46a2-b046-d1d43c4d5622" containerID="3b7bedc97b3cda78e2393a0aae2b103fbc7fc5da8273c80359f0ab3a3bd2f742" exitCode=0 Nov 26 00:06:56 crc kubenswrapper[5045]: I1126 00:06:56.845241 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vcnbd/crc-debug-rgxs2" event={"ID":"e02cfedf-3e9f-46a2-b046-d1d43c4d5622","Type":"ContainerDied","Data":"3b7bedc97b3cda78e2393a0aae2b103fbc7fc5da8273c80359f0ab3a3bd2f742"} Nov 26 00:06:57 crc kubenswrapper[5045]: I1126 00:06:57.958376 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vcnbd/crc-debug-rgxs2" Nov 26 00:06:57 crc kubenswrapper[5045]: I1126 00:06:57.991860 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-vcnbd/crc-debug-rgxs2"] Nov 26 00:06:58 crc kubenswrapper[5045]: I1126 00:06:58.000335 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-vcnbd/crc-debug-rgxs2"] Nov 26 00:06:58 crc kubenswrapper[5045]: I1126 00:06:58.045336 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e02cfedf-3e9f-46a2-b046-d1d43c4d5622-host\") pod \"e02cfedf-3e9f-46a2-b046-d1d43c4d5622\" (UID: \"e02cfedf-3e9f-46a2-b046-d1d43c4d5622\") " Nov 26 00:06:58 crc kubenswrapper[5045]: I1126 00:06:58.045536 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rk7h\" (UniqueName: \"kubernetes.io/projected/e02cfedf-3e9f-46a2-b046-d1d43c4d5622-kube-api-access-6rk7h\") pod \"e02cfedf-3e9f-46a2-b046-d1d43c4d5622\" (UID: \"e02cfedf-3e9f-46a2-b046-d1d43c4d5622\") " Nov 26 00:06:58 crc kubenswrapper[5045]: I1126 00:06:58.046934 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e02cfedf-3e9f-46a2-b046-d1d43c4d5622-host" (OuterVolumeSpecName: "host") pod "e02cfedf-3e9f-46a2-b046-d1d43c4d5622" (UID: "e02cfedf-3e9f-46a2-b046-d1d43c4d5622"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:06:58 crc kubenswrapper[5045]: I1126 00:06:58.055662 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e02cfedf-3e9f-46a2-b046-d1d43c4d5622-kube-api-access-6rk7h" (OuterVolumeSpecName: "kube-api-access-6rk7h") pod "e02cfedf-3e9f-46a2-b046-d1d43c4d5622" (UID: "e02cfedf-3e9f-46a2-b046-d1d43c4d5622"). InnerVolumeSpecName "kube-api-access-6rk7h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:06:58 crc kubenswrapper[5045]: I1126 00:06:58.148413 5045 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e02cfedf-3e9f-46a2-b046-d1d43c4d5622-host\") on node \"crc\" DevicePath \"\"" Nov 26 00:06:58 crc kubenswrapper[5045]: I1126 00:06:58.148481 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6rk7h\" (UniqueName: \"kubernetes.io/projected/e02cfedf-3e9f-46a2-b046-d1d43c4d5622-kube-api-access-6rk7h\") on node \"crc\" DevicePath \"\"" Nov 26 00:06:58 crc kubenswrapper[5045]: I1126 00:06:58.406535 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e02cfedf-3e9f-46a2-b046-d1d43c4d5622" path="/var/lib/kubelet/pods/e02cfedf-3e9f-46a2-b046-d1d43c4d5622/volumes" Nov 26 00:06:58 crc kubenswrapper[5045]: I1126 00:06:58.869312 5045 scope.go:117] "RemoveContainer" containerID="3b7bedc97b3cda78e2393a0aae2b103fbc7fc5da8273c80359f0ab3a3bd2f742" Nov 26 00:06:58 crc kubenswrapper[5045]: I1126 00:06:58.869372 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vcnbd/crc-debug-rgxs2" Nov 26 00:06:59 crc kubenswrapper[5045]: I1126 00:06:59.228555 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-vcnbd/crc-debug-7llkk"] Nov 26 00:06:59 crc kubenswrapper[5045]: E1126 00:06:59.228940 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68a44ad5-dbd7-460c-a449-038cffc19b7e" containerName="extract-content" Nov 26 00:06:59 crc kubenswrapper[5045]: I1126 00:06:59.228953 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="68a44ad5-dbd7-460c-a449-038cffc19b7e" containerName="extract-content" Nov 26 00:06:59 crc kubenswrapper[5045]: E1126 00:06:59.228969 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e02cfedf-3e9f-46a2-b046-d1d43c4d5622" containerName="container-00" Nov 26 00:06:59 crc kubenswrapper[5045]: I1126 00:06:59.228975 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="e02cfedf-3e9f-46a2-b046-d1d43c4d5622" containerName="container-00" Nov 26 00:06:59 crc kubenswrapper[5045]: E1126 00:06:59.228999 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68a44ad5-dbd7-460c-a449-038cffc19b7e" containerName="extract-utilities" Nov 26 00:06:59 crc kubenswrapper[5045]: I1126 00:06:59.229005 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="68a44ad5-dbd7-460c-a449-038cffc19b7e" containerName="extract-utilities" Nov 26 00:06:59 crc kubenswrapper[5045]: E1126 00:06:59.229012 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68a44ad5-dbd7-460c-a449-038cffc19b7e" containerName="registry-server" Nov 26 00:06:59 crc kubenswrapper[5045]: I1126 00:06:59.229019 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="68a44ad5-dbd7-460c-a449-038cffc19b7e" containerName="registry-server" Nov 26 00:06:59 crc kubenswrapper[5045]: I1126 00:06:59.229224 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="e02cfedf-3e9f-46a2-b046-d1d43c4d5622" containerName="container-00" Nov 26 00:06:59 crc kubenswrapper[5045]: I1126 00:06:59.229248 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="68a44ad5-dbd7-460c-a449-038cffc19b7e" containerName="registry-server" Nov 26 00:06:59 crc kubenswrapper[5045]: I1126 00:06:59.229967 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vcnbd/crc-debug-7llkk" Nov 26 00:06:59 crc kubenswrapper[5045]: I1126 00:06:59.266184 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8zlm6\" (UniqueName: \"kubernetes.io/projected/f255f1ff-725a-4f13-8576-1e6b63cf6691-kube-api-access-8zlm6\") pod \"crc-debug-7llkk\" (UID: \"f255f1ff-725a-4f13-8576-1e6b63cf6691\") " pod="openshift-must-gather-vcnbd/crc-debug-7llkk" Nov 26 00:06:59 crc kubenswrapper[5045]: I1126 00:06:59.266295 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f255f1ff-725a-4f13-8576-1e6b63cf6691-host\") pod \"crc-debug-7llkk\" (UID: \"f255f1ff-725a-4f13-8576-1e6b63cf6691\") " pod="openshift-must-gather-vcnbd/crc-debug-7llkk" Nov 26 00:06:59 crc kubenswrapper[5045]: I1126 00:06:59.367828 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8zlm6\" (UniqueName: \"kubernetes.io/projected/f255f1ff-725a-4f13-8576-1e6b63cf6691-kube-api-access-8zlm6\") pod \"crc-debug-7llkk\" (UID: \"f255f1ff-725a-4f13-8576-1e6b63cf6691\") " pod="openshift-must-gather-vcnbd/crc-debug-7llkk" Nov 26 00:06:59 crc kubenswrapper[5045]: I1126 00:06:59.367924 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f255f1ff-725a-4f13-8576-1e6b63cf6691-host\") pod \"crc-debug-7llkk\" (UID: \"f255f1ff-725a-4f13-8576-1e6b63cf6691\") " pod="openshift-must-gather-vcnbd/crc-debug-7llkk" Nov 26 00:06:59 crc kubenswrapper[5045]: I1126 00:06:59.368046 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f255f1ff-725a-4f13-8576-1e6b63cf6691-host\") pod \"crc-debug-7llkk\" (UID: \"f255f1ff-725a-4f13-8576-1e6b63cf6691\") " pod="openshift-must-gather-vcnbd/crc-debug-7llkk" Nov 26 00:06:59 crc kubenswrapper[5045]: I1126 00:06:59.390771 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8zlm6\" (UniqueName: \"kubernetes.io/projected/f255f1ff-725a-4f13-8576-1e6b63cf6691-kube-api-access-8zlm6\") pod \"crc-debug-7llkk\" (UID: \"f255f1ff-725a-4f13-8576-1e6b63cf6691\") " pod="openshift-must-gather-vcnbd/crc-debug-7llkk" Nov 26 00:06:59 crc kubenswrapper[5045]: I1126 00:06:59.546376 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vcnbd/crc-debug-7llkk" Nov 26 00:06:59 crc kubenswrapper[5045]: I1126 00:06:59.883230 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vcnbd/crc-debug-7llkk" event={"ID":"f255f1ff-725a-4f13-8576-1e6b63cf6691","Type":"ContainerStarted","Data":"dc6642e8db041e6e9ab0d5321a3fd74af3b0aedc837266fc6a3d25b66f7c613a"} Nov 26 00:06:59 crc kubenswrapper[5045]: I1126 00:06:59.883517 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vcnbd/crc-debug-7llkk" event={"ID":"f255f1ff-725a-4f13-8576-1e6b63cf6691","Type":"ContainerStarted","Data":"f694783c43d7f28b52aece9d07e33dabdf96b0b8fe9cf6c40b2ac030cfa13649"} Nov 26 00:07:00 crc kubenswrapper[5045]: I1126 00:07:00.301344 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-vcnbd/crc-debug-7llkk"] Nov 26 00:07:00 crc kubenswrapper[5045]: I1126 00:07:00.308562 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-vcnbd/crc-debug-7llkk"] Nov 26 00:07:00 crc kubenswrapper[5045]: I1126 00:07:00.894686 5045 generic.go:334] "Generic (PLEG): container finished" podID="f255f1ff-725a-4f13-8576-1e6b63cf6691" containerID="dc6642e8db041e6e9ab0d5321a3fd74af3b0aedc837266fc6a3d25b66f7c613a" exitCode=0 Nov 26 00:07:00 crc kubenswrapper[5045]: I1126 00:07:00.992391 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vcnbd/crc-debug-7llkk" Nov 26 00:07:01 crc kubenswrapper[5045]: I1126 00:07:01.097106 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f255f1ff-725a-4f13-8576-1e6b63cf6691-host\") pod \"f255f1ff-725a-4f13-8576-1e6b63cf6691\" (UID: \"f255f1ff-725a-4f13-8576-1e6b63cf6691\") " Nov 26 00:07:01 crc kubenswrapper[5045]: I1126 00:07:01.097230 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f255f1ff-725a-4f13-8576-1e6b63cf6691-host" (OuterVolumeSpecName: "host") pod "f255f1ff-725a-4f13-8576-1e6b63cf6691" (UID: "f255f1ff-725a-4f13-8576-1e6b63cf6691"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:07:01 crc kubenswrapper[5045]: I1126 00:07:01.097283 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8zlm6\" (UniqueName: \"kubernetes.io/projected/f255f1ff-725a-4f13-8576-1e6b63cf6691-kube-api-access-8zlm6\") pod \"f255f1ff-725a-4f13-8576-1e6b63cf6691\" (UID: \"f255f1ff-725a-4f13-8576-1e6b63cf6691\") " Nov 26 00:07:01 crc kubenswrapper[5045]: I1126 00:07:01.097782 5045 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f255f1ff-725a-4f13-8576-1e6b63cf6691-host\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:01 crc kubenswrapper[5045]: I1126 00:07:01.102738 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f255f1ff-725a-4f13-8576-1e6b63cf6691-kube-api-access-8zlm6" (OuterVolumeSpecName: "kube-api-access-8zlm6") pod "f255f1ff-725a-4f13-8576-1e6b63cf6691" (UID: "f255f1ff-725a-4f13-8576-1e6b63cf6691"). InnerVolumeSpecName "kube-api-access-8zlm6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:01 crc kubenswrapper[5045]: I1126 00:07:01.199968 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8zlm6\" (UniqueName: \"kubernetes.io/projected/f255f1ff-725a-4f13-8576-1e6b63cf6691-kube-api-access-8zlm6\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:01 crc kubenswrapper[5045]: I1126 00:07:01.461995 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-vcnbd/crc-debug-f7m8c"] Nov 26 00:07:01 crc kubenswrapper[5045]: E1126 00:07:01.462392 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f255f1ff-725a-4f13-8576-1e6b63cf6691" containerName="container-00" Nov 26 00:07:01 crc kubenswrapper[5045]: I1126 00:07:01.462404 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="f255f1ff-725a-4f13-8576-1e6b63cf6691" containerName="container-00" Nov 26 00:07:01 crc kubenswrapper[5045]: I1126 00:07:01.462605 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="f255f1ff-725a-4f13-8576-1e6b63cf6691" containerName="container-00" Nov 26 00:07:01 crc kubenswrapper[5045]: I1126 00:07:01.463331 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vcnbd/crc-debug-f7m8c" Nov 26 00:07:01 crc kubenswrapper[5045]: I1126 00:07:01.505300 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-299sr\" (UniqueName: \"kubernetes.io/projected/64ca8b55-c605-4872-bd75-4bdc6ec640ac-kube-api-access-299sr\") pod \"crc-debug-f7m8c\" (UID: \"64ca8b55-c605-4872-bd75-4bdc6ec640ac\") " pod="openshift-must-gather-vcnbd/crc-debug-f7m8c" Nov 26 00:07:01 crc kubenswrapper[5045]: I1126 00:07:01.505515 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/64ca8b55-c605-4872-bd75-4bdc6ec640ac-host\") pod \"crc-debug-f7m8c\" (UID: \"64ca8b55-c605-4872-bd75-4bdc6ec640ac\") " pod="openshift-must-gather-vcnbd/crc-debug-f7m8c" Nov 26 00:07:01 crc kubenswrapper[5045]: I1126 00:07:01.607431 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/64ca8b55-c605-4872-bd75-4bdc6ec640ac-host\") pod \"crc-debug-f7m8c\" (UID: \"64ca8b55-c605-4872-bd75-4bdc6ec640ac\") " pod="openshift-must-gather-vcnbd/crc-debug-f7m8c" Nov 26 00:07:01 crc kubenswrapper[5045]: I1126 00:07:01.607873 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-299sr\" (UniqueName: \"kubernetes.io/projected/64ca8b55-c605-4872-bd75-4bdc6ec640ac-kube-api-access-299sr\") pod \"crc-debug-f7m8c\" (UID: \"64ca8b55-c605-4872-bd75-4bdc6ec640ac\") " pod="openshift-must-gather-vcnbd/crc-debug-f7m8c" Nov 26 00:07:01 crc kubenswrapper[5045]: I1126 00:07:01.608278 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/64ca8b55-c605-4872-bd75-4bdc6ec640ac-host\") pod \"crc-debug-f7m8c\" (UID: \"64ca8b55-c605-4872-bd75-4bdc6ec640ac\") " pod="openshift-must-gather-vcnbd/crc-debug-f7m8c" Nov 26 00:07:01 crc kubenswrapper[5045]: I1126 00:07:01.623130 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-299sr\" (UniqueName: \"kubernetes.io/projected/64ca8b55-c605-4872-bd75-4bdc6ec640ac-kube-api-access-299sr\") pod \"crc-debug-f7m8c\" (UID: \"64ca8b55-c605-4872-bd75-4bdc6ec640ac\") " pod="openshift-must-gather-vcnbd/crc-debug-f7m8c" Nov 26 00:07:01 crc kubenswrapper[5045]: I1126 00:07:01.782340 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vcnbd/crc-debug-f7m8c" Nov 26 00:07:01 crc kubenswrapper[5045]: W1126 00:07:01.831163 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod64ca8b55_c605_4872_bd75_4bdc6ec640ac.slice/crio-788c019d60e2f0284f04b1d44d82eab7240016e540ebde38831973161d6cc395 WatchSource:0}: Error finding container 788c019d60e2f0284f04b1d44d82eab7240016e540ebde38831973161d6cc395: Status 404 returned error can't find the container with id 788c019d60e2f0284f04b1d44d82eab7240016e540ebde38831973161d6cc395 Nov 26 00:07:01 crc kubenswrapper[5045]: I1126 00:07:01.909321 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vcnbd/crc-debug-f7m8c" event={"ID":"64ca8b55-c605-4872-bd75-4bdc6ec640ac","Type":"ContainerStarted","Data":"788c019d60e2f0284f04b1d44d82eab7240016e540ebde38831973161d6cc395"} Nov 26 00:07:01 crc kubenswrapper[5045]: I1126 00:07:01.913443 5045 scope.go:117] "RemoveContainer" containerID="dc6642e8db041e6e9ab0d5321a3fd74af3b0aedc837266fc6a3d25b66f7c613a" Nov 26 00:07:01 crc kubenswrapper[5045]: I1126 00:07:01.913544 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vcnbd/crc-debug-7llkk" Nov 26 00:07:02 crc kubenswrapper[5045]: I1126 00:07:02.405979 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f255f1ff-725a-4f13-8576-1e6b63cf6691" path="/var/lib/kubelet/pods/f255f1ff-725a-4f13-8576-1e6b63cf6691/volumes" Nov 26 00:07:02 crc kubenswrapper[5045]: I1126 00:07:02.926337 5045 generic.go:334] "Generic (PLEG): container finished" podID="64ca8b55-c605-4872-bd75-4bdc6ec640ac" containerID="faf58d2af023cb4d7950c62c2a0689c25e54665444aabf956848962387cc574f" exitCode=0 Nov 26 00:07:02 crc kubenswrapper[5045]: I1126 00:07:02.926433 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vcnbd/crc-debug-f7m8c" event={"ID":"64ca8b55-c605-4872-bd75-4bdc6ec640ac","Type":"ContainerDied","Data":"faf58d2af023cb4d7950c62c2a0689c25e54665444aabf956848962387cc574f"} Nov 26 00:07:02 crc kubenswrapper[5045]: I1126 00:07:02.973262 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-vcnbd/crc-debug-f7m8c"] Nov 26 00:07:02 crc kubenswrapper[5045]: I1126 00:07:02.981637 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-vcnbd/crc-debug-f7m8c"] Nov 26 00:07:04 crc kubenswrapper[5045]: I1126 00:07:04.079316 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vcnbd/crc-debug-f7m8c" Nov 26 00:07:04 crc kubenswrapper[5045]: I1126 00:07:04.154691 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/64ca8b55-c605-4872-bd75-4bdc6ec640ac-host\") pod \"64ca8b55-c605-4872-bd75-4bdc6ec640ac\" (UID: \"64ca8b55-c605-4872-bd75-4bdc6ec640ac\") " Nov 26 00:07:04 crc kubenswrapper[5045]: I1126 00:07:04.154863 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/64ca8b55-c605-4872-bd75-4bdc6ec640ac-host" (OuterVolumeSpecName: "host") pod "64ca8b55-c605-4872-bd75-4bdc6ec640ac" (UID: "64ca8b55-c605-4872-bd75-4bdc6ec640ac"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:07:04 crc kubenswrapper[5045]: I1126 00:07:04.155032 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-299sr\" (UniqueName: \"kubernetes.io/projected/64ca8b55-c605-4872-bd75-4bdc6ec640ac-kube-api-access-299sr\") pod \"64ca8b55-c605-4872-bd75-4bdc6ec640ac\" (UID: \"64ca8b55-c605-4872-bd75-4bdc6ec640ac\") " Nov 26 00:07:04 crc kubenswrapper[5045]: I1126 00:07:04.156391 5045 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/64ca8b55-c605-4872-bd75-4bdc6ec640ac-host\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:04 crc kubenswrapper[5045]: I1126 00:07:04.160860 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64ca8b55-c605-4872-bd75-4bdc6ec640ac-kube-api-access-299sr" (OuterVolumeSpecName: "kube-api-access-299sr") pod "64ca8b55-c605-4872-bd75-4bdc6ec640ac" (UID: "64ca8b55-c605-4872-bd75-4bdc6ec640ac"). InnerVolumeSpecName "kube-api-access-299sr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:04 crc kubenswrapper[5045]: I1126 00:07:04.258680 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-299sr\" (UniqueName: \"kubernetes.io/projected/64ca8b55-c605-4872-bd75-4bdc6ec640ac-kube-api-access-299sr\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:04 crc kubenswrapper[5045]: I1126 00:07:04.412306 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64ca8b55-c605-4872-bd75-4bdc6ec640ac" path="/var/lib/kubelet/pods/64ca8b55-c605-4872-bd75-4bdc6ec640ac/volumes" Nov 26 00:07:04 crc kubenswrapper[5045]: I1126 00:07:04.956111 5045 scope.go:117] "RemoveContainer" containerID="faf58d2af023cb4d7950c62c2a0689c25e54665444aabf956848962387cc574f" Nov 26 00:07:04 crc kubenswrapper[5045]: I1126 00:07:04.956219 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vcnbd/crc-debug-f7m8c" Nov 26 00:07:30 crc kubenswrapper[5045]: I1126 00:07:30.541227 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:07:30 crc kubenswrapper[5045]: I1126 00:07:30.541935 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:07:50 crc kubenswrapper[5045]: I1126 00:07:50.222839 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6477c784d-pjbws_4cf1306a-a479-4be9-9d81-a24e584294a5/barbican-api/0.log" Nov 26 00:07:50 crc kubenswrapper[5045]: I1126 00:07:50.409511 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6477c784d-pjbws_4cf1306a-a479-4be9-9d81-a24e584294a5/barbican-api-log/0.log" Nov 26 00:07:50 crc kubenswrapper[5045]: I1126 00:07:50.440656 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-f86f47df6-cktpw_930a6fb5-dcf0-455c-97a7-5446766b0d01/barbican-keystone-listener/0.log" Nov 26 00:07:50 crc kubenswrapper[5045]: I1126 00:07:50.528006 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-f86f47df6-cktpw_930a6fb5-dcf0-455c-97a7-5446766b0d01/barbican-keystone-listener-log/0.log" Nov 26 00:07:50 crc kubenswrapper[5045]: I1126 00:07:50.650699 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-679bb9cf9-plnhs_b77ab75f-32f2-4664-a48e-76699f609a7b/barbican-worker-log/0.log" Nov 26 00:07:50 crc kubenswrapper[5045]: I1126 00:07:50.651256 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-679bb9cf9-plnhs_b77ab75f-32f2-4664-a48e-76699f609a7b/barbican-worker/0.log" Nov 26 00:07:50 crc kubenswrapper[5045]: I1126 00:07:50.842378 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-ttvbv_7301c52a-3ce7-478e-867e-6f458de32f19/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 00:07:50 crc kubenswrapper[5045]: I1126 00:07:50.946028 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_3d111259-fd01-46df-9e3b-e25c7a05f59d/ceilometer-central-agent/0.log" Nov 26 00:07:51 crc kubenswrapper[5045]: I1126 00:07:51.010336 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_3d111259-fd01-46df-9e3b-e25c7a05f59d/ceilometer-notification-agent/0.log" Nov 26 00:07:51 crc kubenswrapper[5045]: I1126 00:07:51.032606 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_3d111259-fd01-46df-9e3b-e25c7a05f59d/proxy-httpd/0.log" Nov 26 00:07:51 crc kubenswrapper[5045]: I1126 00:07:51.131463 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_3d111259-fd01-46df-9e3b-e25c7a05f59d/sg-core/0.log" Nov 26 00:07:51 crc kubenswrapper[5045]: I1126 00:07:51.263660 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-client-edpm-deployment-openstack-edpm-ipam-52524_a98dde15-bff6-4ed8-8216-142372401818/ceph-client-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 00:07:51 crc kubenswrapper[5045]: I1126 00:07:51.375934 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-bxcq5_8e0f7b11-159a-4941-9abe-03adde83a57c/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 00:07:51 crc kubenswrapper[5045]: I1126 00:07:51.478968 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_945c5d6c-f96a-4d6c-a78d-795e26a25699/cinder-api-log/0.log" Nov 26 00:07:51 crc kubenswrapper[5045]: I1126 00:07:51.535956 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_945c5d6c-f96a-4d6c-a78d-795e26a25699/cinder-api/0.log" Nov 26 00:07:51 crc kubenswrapper[5045]: I1126 00:07:51.731623 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_d86361ad-5146-475b-b0d4-c505b002904b/probe/0.log" Nov 26 00:07:51 crc kubenswrapper[5045]: I1126 00:07:51.820729 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-db-purge-29401921-mw666_376db204-9db9-4576-a300-452841866605/cinder-db-purge/0.log" Nov 26 00:07:51 crc kubenswrapper[5045]: I1126 00:07:51.922425 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_d86361ad-5146-475b-b0d4-c505b002904b/cinder-backup/0.log" Nov 26 00:07:52 crc kubenswrapper[5045]: I1126 00:07:52.037989 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_efac58ce-5053-4a60-bcd4-41b7c1f483f2/cinder-scheduler/0.log" Nov 26 00:07:52 crc kubenswrapper[5045]: I1126 00:07:52.138360 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_efac58ce-5053-4a60-bcd4-41b7c1f483f2/probe/0.log" Nov 26 00:07:52 crc kubenswrapper[5045]: I1126 00:07:52.278272 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_3dcd1a1a-085e-472a-8dab-788bba3c3ce4/probe/0.log" Nov 26 00:07:52 crc kubenswrapper[5045]: I1126 00:07:52.357528 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_3dcd1a1a-085e-472a-8dab-788bba3c3ce4/cinder-volume/0.log" Nov 26 00:07:52 crc kubenswrapper[5045]: I1126 00:07:52.452604 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-rjvbb_c9631995-c169-41d0-90cb-9d1566919f23/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 00:07:52 crc kubenswrapper[5045]: I1126 00:07:52.707313 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-pdt9h_6ffaaa5a-fd54-4e1a-8591-6991152aa8de/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 00:07:52 crc kubenswrapper[5045]: I1126 00:07:52.769919 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-69655fd4bf-4xxtm_1fefc039-452f-4d2f-a4f8-2e73833e05f6/init/0.log" Nov 26 00:07:52 crc kubenswrapper[5045]: I1126 00:07:52.974615 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-69655fd4bf-4xxtm_1fefc039-452f-4d2f-a4f8-2e73833e05f6/dnsmasq-dns/0.log" Nov 26 00:07:52 crc kubenswrapper[5045]: I1126 00:07:52.989003 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-69655fd4bf-4xxtm_1fefc039-452f-4d2f-a4f8-2e73833e05f6/init/0.log" Nov 26 00:07:53 crc kubenswrapper[5045]: I1126 00:07:53.032550 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-db-purge-29401921-gs822_14363206-5b08-472d-8fe1-4950a2378e74/glance-dbpurge/0.log" Nov 26 00:07:53 crc kubenswrapper[5045]: I1126 00:07:53.214688 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_bf6cc533-9827-4132-9d84-50fe49efef41/glance-log/0.log" Nov 26 00:07:53 crc kubenswrapper[5045]: I1126 00:07:53.257741 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_bf6cc533-9827-4132-9d84-50fe49efef41/glance-httpd/0.log" Nov 26 00:07:53 crc kubenswrapper[5045]: I1126 00:07:53.414453 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_c46cc706-d7b4-4d2a-b75e-a8bed8a125eb/glance-httpd/0.log" Nov 26 00:07:53 crc kubenswrapper[5045]: I1126 00:07:53.443444 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_c46cc706-d7b4-4d2a-b75e-a8bed8a125eb/glance-log/0.log" Nov 26 00:07:53 crc kubenswrapper[5045]: I1126 00:07:53.549424 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-88c75b4b-7vjc8_a78ee35a-ac96-40b7-b9aa-92bdaadf339b/horizon/0.log" Nov 26 00:07:53 crc kubenswrapper[5045]: I1126 00:07:53.661260 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-knvng_4dcec4cb-07ca-4c0a-afaa-672e534cf521/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 00:07:53 crc kubenswrapper[5045]: I1126 00:07:53.693594 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-88c75b4b-7vjc8_a78ee35a-ac96-40b7-b9aa-92bdaadf339b/horizon-log/0.log" Nov 26 00:07:53 crc kubenswrapper[5045]: I1126 00:07:53.954558 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-lzmxw_f0795a52-0ba0-497f-a55c-8888a54c0fa8/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 00:07:54 crc kubenswrapper[5045]: I1126 00:07:54.062529 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-68c57f7894-dq5jz_e85b05a0-5d74-4df9-b09c-a68596f45b6e/keystone-api/0.log" Nov 26 00:07:54 crc kubenswrapper[5045]: I1126 00:07:54.099835 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29401921-6p59q_b5251d69-bf33-43f9-ad5f-ea0937a59e10/keystone-cron/0.log" Nov 26 00:07:54 crc kubenswrapper[5045]: I1126 00:07:54.162881 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_9c3528ab-a9a5-4209-ae3c-b0fc8f9e24d2/kube-state-metrics/0.log" Nov 26 00:07:54 crc kubenswrapper[5045]: I1126 00:07:54.278000 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-gfpv5_886ae822-5a4e-4578-a137-1322687c1a77/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 00:07:54 crc kubenswrapper[5045]: I1126 00:07:54.387164 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_0a1b4a8a-456e-4756-8321-079731f5f729/manila-api-log/0.log" Nov 26 00:07:54 crc kubenswrapper[5045]: I1126 00:07:54.521097 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-db-purge-29401921-6pglr_cef47d90-76d7-4f52-8583-94f0decfd788/manila-db-purge/0.log" Nov 26 00:07:54 crc kubenswrapper[5045]: I1126 00:07:54.585155 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_0a1b4a8a-456e-4756-8321-079731f5f729/manila-api/0.log" Nov 26 00:07:54 crc kubenswrapper[5045]: I1126 00:07:54.748388 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_3b78dcef-2652-44a3-8d97-cb40f963d225/manila-scheduler/0.log" Nov 26 00:07:54 crc kubenswrapper[5045]: I1126 00:07:54.761850 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_3b78dcef-2652-44a3-8d97-cb40f963d225/probe/0.log" Nov 26 00:07:54 crc kubenswrapper[5045]: I1126 00:07:54.840221 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_ce03c0a7-a5a4-48df-8d94-5d3c7464efc1/manila-share/0.log" Nov 26 00:07:54 crc kubenswrapper[5045]: I1126 00:07:54.926662 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_ce03c0a7-a5a4-48df-8d94-5d3c7464efc1/probe/0.log" Nov 26 00:07:55 crc kubenswrapper[5045]: I1126 00:07:55.125029 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5db8cdc695-2sz2g_6970371c-e072-49ea-97b5-a6bed28d5372/neutron-api/0.log" Nov 26 00:07:55 crc kubenswrapper[5045]: I1126 00:07:55.172397 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5db8cdc695-2sz2g_6970371c-e072-49ea-97b5-a6bed28d5372/neutron-httpd/0.log" Nov 26 00:07:55 crc kubenswrapper[5045]: I1126 00:07:55.325909 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-jd4v6_1f6014b1-5500-49d6-a729-dfb677b8a1cc/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 00:07:55 crc kubenswrapper[5045]: I1126 00:07:55.547540 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_65d3b56d-211c-4392-b6ee-449e68d546a0/nova-api-log/0.log" Nov 26 00:07:55 crc kubenswrapper[5045]: I1126 00:07:55.803036 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-db-purge-29401920-5drnw_ce1d9883-706a-4df4-82e8-255c03424e7a/nova-manage/0.log" Nov 26 00:07:55 crc kubenswrapper[5045]: I1126 00:07:55.833600 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_98d79db2-7dcf-4b5a-8d2b-bd1a799b843f/nova-cell0-conductor-conductor/0.log" Nov 26 00:07:55 crc kubenswrapper[5045]: I1126 00:07:55.839422 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_65d3b56d-211c-4392-b6ee-449e68d546a0/nova-api-api/0.log" Nov 26 00:07:56 crc kubenswrapper[5045]: I1126 00:07:56.200267 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-db-purge-29401920-qtzfs_dc4aaa94-7a82-43c5-a8d0-da8ab7370ea1/nova-manage/0.log" Nov 26 00:07:56 crc kubenswrapper[5045]: I1126 00:07:56.381700 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_cf45160d-7c2c-4d8c-81da-db68ed300d2d/nova-cell1-conductor-conductor/0.log" Nov 26 00:07:56 crc kubenswrapper[5045]: I1126 00:07:56.498947 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_4a276597-ed77-4cd6-95b0-57e64be23060/nova-cell1-novncproxy-novncproxy/0.log" Nov 26 00:07:56 crc kubenswrapper[5045]: I1126 00:07:56.639740 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pc8fk_2a2c3598-f1cb-4b09-8410-48442361a88a/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 00:07:56 crc kubenswrapper[5045]: I1126 00:07:56.787753 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_4af60a2e-c5d2-4f99-912d-8c269561a2e0/nova-metadata-log/0.log" Nov 26 00:07:57 crc kubenswrapper[5045]: I1126 00:07:57.113831 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_4fc80e12-1f82-458d-a8c1-4e7625a9381c/nova-scheduler-scheduler/0.log" Nov 26 00:07:57 crc kubenswrapper[5045]: I1126 00:07:57.130242 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_abaa26fa-f1a8-4249-8179-ad1b64334be5/mysql-bootstrap/0.log" Nov 26 00:07:57 crc kubenswrapper[5045]: I1126 00:07:57.264739 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_abaa26fa-f1a8-4249-8179-ad1b64334be5/mysql-bootstrap/0.log" Nov 26 00:07:57 crc kubenswrapper[5045]: I1126 00:07:57.376284 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_abaa26fa-f1a8-4249-8179-ad1b64334be5/galera/0.log" Nov 26 00:07:57 crc kubenswrapper[5045]: I1126 00:07:57.463972 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_981d9260-fe05-4c33-9c46-c65a7a31c7b1/mysql-bootstrap/0.log" Nov 26 00:07:57 crc kubenswrapper[5045]: I1126 00:07:57.656654 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_981d9260-fe05-4c33-9c46-c65a7a31c7b1/mysql-bootstrap/0.log" Nov 26 00:07:57 crc kubenswrapper[5045]: I1126 00:07:57.693888 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_981d9260-fe05-4c33-9c46-c65a7a31c7b1/galera/0.log" Nov 26 00:07:57 crc kubenswrapper[5045]: I1126 00:07:57.825340 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_7909b434-b7ea-46af-8c4e-b5454df0ba0f/openstackclient/0.log" Nov 26 00:07:57 crc kubenswrapper[5045]: I1126 00:07:57.949469 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-kfwsl_efb42386-0a5d-423f-b31e-13e9433271ba/ovn-controller/0.log" Nov 26 00:07:58 crc kubenswrapper[5045]: I1126 00:07:58.136979 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-wrcbr_a5ad3a64-612a-442a-beed-2dcf6303b974/openstack-network-exporter/0.log" Nov 26 00:07:58 crc kubenswrapper[5045]: I1126 00:07:58.178661 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_95d30530-9d52-442a-94e0-3e85871f0c4f/memcached/0.log" Nov 26 00:07:58 crc kubenswrapper[5045]: I1126 00:07:58.183185 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_4af60a2e-c5d2-4f99-912d-8c269561a2e0/nova-metadata-metadata/0.log" Nov 26 00:07:58 crc kubenswrapper[5045]: I1126 00:07:58.272238 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-z2vw6_1765d304-e95f-43d5-9655-84e468fe332e/ovsdb-server-init/0.log" Nov 26 00:07:58 crc kubenswrapper[5045]: I1126 00:07:58.462076 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-z2vw6_1765d304-e95f-43d5-9655-84e468fe332e/ovs-vswitchd/0.log" Nov 26 00:07:58 crc kubenswrapper[5045]: I1126 00:07:58.464922 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-z2vw6_1765d304-e95f-43d5-9655-84e468fe332e/ovsdb-server-init/0.log" Nov 26 00:07:58 crc kubenswrapper[5045]: I1126 00:07:58.505960 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-z2vw6_1765d304-e95f-43d5-9655-84e468fe332e/ovsdb-server/0.log" Nov 26 00:07:58 crc kubenswrapper[5045]: I1126 00:07:58.547300 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-2p27n_9951b664-25eb-49a9-ba49-6bd594f857df/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 00:07:58 crc kubenswrapper[5045]: I1126 00:07:58.645349 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_f7cd8ff8-57e1-4909-b7c7-93f707770aaa/openstack-network-exporter/0.log" Nov 26 00:07:58 crc kubenswrapper[5045]: I1126 00:07:58.689623 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_f7cd8ff8-57e1-4909-b7c7-93f707770aaa/ovn-northd/0.log" Nov 26 00:07:58 crc kubenswrapper[5045]: I1126 00:07:58.734550 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_3f0b6c93-84c1-4b4a-8d1d-844d035fe867/openstack-network-exporter/0.log" Nov 26 00:07:58 crc kubenswrapper[5045]: I1126 00:07:58.844917 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_3f0b6c93-84c1-4b4a-8d1d-844d035fe867/ovsdbserver-nb/0.log" Nov 26 00:07:58 crc kubenswrapper[5045]: I1126 00:07:58.985636 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_dca2a357-2bb3-4400-a74b-5ec428e7a710/openstack-network-exporter/0.log" Nov 26 00:07:59 crc kubenswrapper[5045]: I1126 00:07:59.013767 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_dca2a357-2bb3-4400-a74b-5ec428e7a710/ovsdbserver-sb/0.log" Nov 26 00:07:59 crc kubenswrapper[5045]: I1126 00:07:59.074103 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-65cddd5cf6-fpz78_3a86c07d-787b-4255-8861-e1c03bc78303/placement-api/0.log" Nov 26 00:07:59 crc kubenswrapper[5045]: I1126 00:07:59.140085 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-65cddd5cf6-fpz78_3a86c07d-787b-4255-8861-e1c03bc78303/placement-log/0.log" Nov 26 00:07:59 crc kubenswrapper[5045]: I1126 00:07:59.224138 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_74ac97d4-b89e-47c2-b249-9e70da06d165/setup-container/0.log" Nov 26 00:07:59 crc kubenswrapper[5045]: I1126 00:07:59.425216 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_74ac97d4-b89e-47c2-b249-9e70da06d165/setup-container/0.log" Nov 26 00:07:59 crc kubenswrapper[5045]: I1126 00:07:59.452072 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_73cc82f8-4d6b-4608-9881-664a8194fc6f/setup-container/0.log" Nov 26 00:07:59 crc kubenswrapper[5045]: I1126 00:07:59.455664 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_74ac97d4-b89e-47c2-b249-9e70da06d165/rabbitmq/0.log" Nov 26 00:07:59 crc kubenswrapper[5045]: I1126 00:07:59.764671 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_73cc82f8-4d6b-4608-9881-664a8194fc6f/rabbitmq/0.log" Nov 26 00:07:59 crc kubenswrapper[5045]: I1126 00:07:59.772288 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_73cc82f8-4d6b-4608-9881-664a8194fc6f/setup-container/0.log" Nov 26 00:07:59 crc kubenswrapper[5045]: I1126 00:07:59.778962 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-hzvgc_b7c3894b-8400-492c-b4b9-b45ec555cc68/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 00:07:59 crc kubenswrapper[5045]: I1126 00:07:59.954669 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-v9tjr_06f796e9-76e4-4067-915d-4efd6e38226a/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 00:08:00 crc kubenswrapper[5045]: I1126 00:08:00.004005 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-fbvf9_96f74290-34d3-41c3-8088-09e598444e2e/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 00:08:00 crc kubenswrapper[5045]: I1126 00:08:00.069698 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-cpmnq_7747bcca-12c7-4cf7-82e3-2a554f853dce/ssh-known-hosts-edpm-deployment/0.log" Nov 26 00:08:00 crc kubenswrapper[5045]: I1126 00:08:00.156907 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_44c7f66f-9267-4165-8d51-e8fa7b33b354/tempest-tests-tempest-tests-runner/0.log" Nov 26 00:08:00 crc kubenswrapper[5045]: I1126 00:08:00.244405 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_856da688-70c4-4b00-9b63-0d93aee55d2b/test-operator-logs-container/0.log" Nov 26 00:08:00 crc kubenswrapper[5045]: I1126 00:08:00.320055 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-8fzqz_8e685995-6390-45d1-948f-9aa20cef1060/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 00:08:00 crc kubenswrapper[5045]: I1126 00:08:00.540754 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:08:00 crc kubenswrapper[5045]: I1126 00:08:00.540812 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:08:24 crc kubenswrapper[5045]: I1126 00:08:24.598397 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92_a95c5e20-6f36-41d3-a164-1768574075ee/util/0.log" Nov 26 00:08:24 crc kubenswrapper[5045]: I1126 00:08:24.760988 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92_a95c5e20-6f36-41d3-a164-1768574075ee/pull/0.log" Nov 26 00:08:24 crc kubenswrapper[5045]: I1126 00:08:24.790331 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92_a95c5e20-6f36-41d3-a164-1768574075ee/util/0.log" Nov 26 00:08:24 crc kubenswrapper[5045]: I1126 00:08:24.791261 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92_a95c5e20-6f36-41d3-a164-1768574075ee/pull/0.log" Nov 26 00:08:24 crc kubenswrapper[5045]: I1126 00:08:24.937296 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92_a95c5e20-6f36-41d3-a164-1768574075ee/util/0.log" Nov 26 00:08:24 crc kubenswrapper[5045]: I1126 00:08:24.938231 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92_a95c5e20-6f36-41d3-a164-1768574075ee/pull/0.log" Nov 26 00:08:24 crc kubenswrapper[5045]: I1126 00:08:24.982060 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1f7b3845ca78f4fcb4a22c62df2ef925b3de5ab0d549d9d06ec1525bd2pqw92_a95c5e20-6f36-41d3-a164-1768574075ee/extract/0.log" Nov 26 00:08:25 crc kubenswrapper[5045]: I1126 00:08:25.118297 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-hmwhx_e287d4e5-6925-42eb-a661-fded8259123f/kube-rbac-proxy/0.log" Nov 26 00:08:25 crc kubenswrapper[5045]: I1126 00:08:25.197496 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-hmwhx_e287d4e5-6925-42eb-a661-fded8259123f/manager/0.log" Nov 26 00:08:25 crc kubenswrapper[5045]: I1126 00:08:25.230009 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-tkwkz_f1030448-0fd8-42d3-9a83-2e27d87c855e/kube-rbac-proxy/0.log" Nov 26 00:08:25 crc kubenswrapper[5045]: I1126 00:08:25.366032 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-tkwkz_f1030448-0fd8-42d3-9a83-2e27d87c855e/manager/0.log" Nov 26 00:08:25 crc kubenswrapper[5045]: I1126 00:08:25.415475 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-npq84_344fd1ea-983e-4515-aa8a-479ec0c46c81/manager/0.log" Nov 26 00:08:25 crc kubenswrapper[5045]: I1126 00:08:25.446733 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-npq84_344fd1ea-983e-4515-aa8a-479ec0c46c81/kube-rbac-proxy/0.log" Nov 26 00:08:25 crc kubenswrapper[5045]: I1126 00:08:25.640669 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-76f7fdd9bd-84dxm_39fa03d1-e77d-46bd-bc4c-d83960611145/kube-rbac-proxy/0.log" Nov 26 00:08:25 crc kubenswrapper[5045]: I1126 00:08:25.660849 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-76f7fdd9bd-84dxm_39fa03d1-e77d-46bd-bc4c-d83960611145/manager/0.log" Nov 26 00:08:25 crc kubenswrapper[5045]: I1126 00:08:25.787661 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-8mpkk_a1596ec7-ab5c-4e5e-ba3a-11772bdaa64b/kube-rbac-proxy/0.log" Nov 26 00:08:25 crc kubenswrapper[5045]: I1126 00:08:25.839550 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-8mpkk_a1596ec7-ab5c-4e5e-ba3a-11772bdaa64b/manager/0.log" Nov 26 00:08:25 crc kubenswrapper[5045]: I1126 00:08:25.920593 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-qzhwg_20abfff9-9e94-466e-a2bc-487a231b86a5/kube-rbac-proxy/0.log" Nov 26 00:08:25 crc kubenswrapper[5045]: I1126 00:08:25.961032 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-qzhwg_20abfff9-9e94-466e-a2bc-487a231b86a5/manager/0.log" Nov 26 00:08:26 crc kubenswrapper[5045]: I1126 00:08:26.134283 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-dhjmg_cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16/kube-rbac-proxy/0.log" Nov 26 00:08:26 crc kubenswrapper[5045]: I1126 00:08:26.277584 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-mb2mk_2b6f436b-9a87-463e-a7ca-48ae08ba5f10/kube-rbac-proxy/0.log" Nov 26 00:08:26 crc kubenswrapper[5045]: I1126 00:08:26.344233 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-dhjmg_cf4cce85-c60e-4a1a-85f8-4ffaa4d80d16/manager/0.log" Nov 26 00:08:26 crc kubenswrapper[5045]: I1126 00:08:26.376896 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-mb2mk_2b6f436b-9a87-463e-a7ca-48ae08ba5f10/manager/0.log" Nov 26 00:08:26 crc kubenswrapper[5045]: I1126 00:08:26.486629 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-7rzvl_4c3613c7-39a6-46b5-82da-a461d37d8965/kube-rbac-proxy/0.log" Nov 26 00:08:26 crc kubenswrapper[5045]: I1126 00:08:26.569102 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-7rzvl_4c3613c7-39a6-46b5-82da-a461d37d8965/manager/0.log" Nov 26 00:08:26 crc kubenswrapper[5045]: I1126 00:08:26.648690 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-q7rbk_289a9811-aa55-449c-aa82-a56f4b1ef53e/kube-rbac-proxy/0.log" Nov 26 00:08:26 crc kubenswrapper[5045]: I1126 00:08:26.755741 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-q7rbk_289a9811-aa55-449c-aa82-a56f4b1ef53e/manager/0.log" Nov 26 00:08:27 crc kubenswrapper[5045]: I1126 00:08:27.022389 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-phr8m_485f4bbf-205f-4ea5-8009-a0cdeb204139/kube-rbac-proxy/0.log" Nov 26 00:08:27 crc kubenswrapper[5045]: I1126 00:08:27.033801 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-phr8m_485f4bbf-205f-4ea5-8009-a0cdeb204139/manager/0.log" Nov 26 00:08:27 crc kubenswrapper[5045]: I1126 00:08:27.203077 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-m98w9_38cf0eb2-2d59-418b-9e24-b04c72c58c9f/kube-rbac-proxy/0.log" Nov 26 00:08:27 crc kubenswrapper[5045]: I1126 00:08:27.277505 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-m98w9_38cf0eb2-2d59-418b-9e24-b04c72c58c9f/manager/0.log" Nov 26 00:08:28 crc kubenswrapper[5045]: I1126 00:08:28.120142 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-tkgqg_4b50dd58-f8a3-4ce4-b008-dd810e1a424d/manager/0.log" Nov 26 00:08:28 crc kubenswrapper[5045]: I1126 00:08:28.149198 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-tkgqg_4b50dd58-f8a3-4ce4-b008-dd810e1a424d/kube-rbac-proxy/0.log" Nov 26 00:08:28 crc kubenswrapper[5045]: I1126 00:08:28.199895 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-g4pvl_bbcd8ad7-d4b2-4eef-979f-00b2c4e1b13e/kube-rbac-proxy/0.log" Nov 26 00:08:28 crc kubenswrapper[5045]: I1126 00:08:28.288185 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-g4pvl_bbcd8ad7-d4b2-4eef-979f-00b2c4e1b13e/manager/0.log" Nov 26 00:08:28 crc kubenswrapper[5045]: I1126 00:08:28.326140 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r_c5f48852-fbb4-429b-93c8-19121a51be4a/kube-rbac-proxy/0.log" Nov 26 00:08:28 crc kubenswrapper[5045]: I1126 00:08:28.377392 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6brps5r_c5f48852-fbb4-429b-93c8-19121a51be4a/manager/0.log" Nov 26 00:08:28 crc kubenswrapper[5045]: I1126 00:08:28.651812 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-bl9nc_59064a60-9836-4862-8b6b-ba68ce13975d/registry-server/0.log" Nov 26 00:08:28 crc kubenswrapper[5045]: I1126 00:08:28.783875 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-7d45d649c4-bccwd_fc249ddc-e18a-4677-8e37-7b7d449876d9/operator/0.log" Nov 26 00:08:28 crc kubenswrapper[5045]: I1126 00:08:28.805473 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-rk9dz_35229e25-460b-4c57-9dae-6dceadf19b3f/kube-rbac-proxy/0.log" Nov 26 00:08:28 crc kubenswrapper[5045]: I1126 00:08:28.934411 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-rk9dz_35229e25-460b-4c57-9dae-6dceadf19b3f/manager/0.log" Nov 26 00:08:29 crc kubenswrapper[5045]: I1126 00:08:29.018337 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-2pfmh_da63a4ac-64af-4d60-b968-274c9960b665/kube-rbac-proxy/0.log" Nov 26 00:08:29 crc kubenswrapper[5045]: I1126 00:08:29.071371 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-2pfmh_da63a4ac-64af-4d60-b968-274c9960b665/manager/0.log" Nov 26 00:08:29 crc kubenswrapper[5045]: I1126 00:08:29.231542 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-85bpk_6c15a559-c39e-47a5-83b2-74a6e830c1b2/operator/0.log" Nov 26 00:08:29 crc kubenswrapper[5045]: I1126 00:08:29.674050 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-746744c96-rtr9q_aeeca19c-1da3-4bc7-934d-fa4c8663ca04/manager/0.log" Nov 26 00:08:29 crc kubenswrapper[5045]: I1126 00:08:29.734260 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-n7s8v_9092d9d6-3e10-4f43-84e7-121153c39104/kube-rbac-proxy/0.log" Nov 26 00:08:29 crc kubenswrapper[5045]: I1126 00:08:29.791247 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-n7s8v_9092d9d6-3e10-4f43-84e7-121153c39104/manager/0.log" Nov 26 00:08:29 crc kubenswrapper[5045]: I1126 00:08:29.796174 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-jzkxl_39ce2a8b-211e-4bb4-91a3-0999e4f45162/kube-rbac-proxy/0.log" Nov 26 00:08:29 crc kubenswrapper[5045]: I1126 00:08:29.923906 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-jzkxl_39ce2a8b-211e-4bb4-91a3-0999e4f45162/manager/0.log" Nov 26 00:08:29 crc kubenswrapper[5045]: I1126 00:08:29.970474 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-vsfg5_74cee554-ae39-4dd6-b932-dc432e32cda0/kube-rbac-proxy/0.log" Nov 26 00:08:30 crc kubenswrapper[5045]: I1126 00:08:30.207923 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-vsfg5_74cee554-ae39-4dd6-b932-dc432e32cda0/manager/0.log" Nov 26 00:08:30 crc kubenswrapper[5045]: I1126 00:08:30.324765 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-r4wcc_eb320624-b1a8-45b6-891f-0b4517a5376e/kube-rbac-proxy/0.log" Nov 26 00:08:30 crc kubenswrapper[5045]: I1126 00:08:30.357951 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-r4wcc_eb320624-b1a8-45b6-891f-0b4517a5376e/manager/0.log" Nov 26 00:08:30 crc kubenswrapper[5045]: I1126 00:08:30.540736 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:08:30 crc kubenswrapper[5045]: I1126 00:08:30.540801 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:08:30 crc kubenswrapper[5045]: I1126 00:08:30.540847 5045 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 26 00:08:30 crc kubenswrapper[5045]: I1126 00:08:30.541625 5045 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"28b72954faac5d5a2ef5c6de8b1d4873e24cc35ea4d44d3b52d3e1df98dfe607"} pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 00:08:30 crc kubenswrapper[5045]: I1126 00:08:30.541681 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" containerID="cri-o://28b72954faac5d5a2ef5c6de8b1d4873e24cc35ea4d44d3b52d3e1df98dfe607" gracePeriod=600 Nov 26 00:08:30 crc kubenswrapper[5045]: I1126 00:08:30.792656 5045 generic.go:334] "Generic (PLEG): container finished" podID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerID="28b72954faac5d5a2ef5c6de8b1d4873e24cc35ea4d44d3b52d3e1df98dfe607" exitCode=0 Nov 26 00:08:30 crc kubenswrapper[5045]: I1126 00:08:30.792761 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerDied","Data":"28b72954faac5d5a2ef5c6de8b1d4873e24cc35ea4d44d3b52d3e1df98dfe607"} Nov 26 00:08:30 crc kubenswrapper[5045]: I1126 00:08:30.793085 5045 scope.go:117] "RemoveContainer" containerID="cb048626d0ac5a5071d5ed9400de0298a7313be695e0c97222858254ff7fe68b" Nov 26 00:08:31 crc kubenswrapper[5045]: I1126 00:08:31.819113 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerStarted","Data":"04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7"} Nov 26 00:08:50 crc kubenswrapper[5045]: I1126 00:08:50.015186 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-955fw_7e777e65-538b-4823-abd9-f6c387f3fba3/control-plane-machine-set-operator/0.log" Nov 26 00:08:50 crc kubenswrapper[5045]: I1126 00:08:50.049062 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-tst4h_601df050-5421-4266-bf7c-60096a066a24/kube-rbac-proxy/0.log" Nov 26 00:08:50 crc kubenswrapper[5045]: I1126 00:08:50.182776 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-tst4h_601df050-5421-4266-bf7c-60096a066a24/machine-api-operator/0.log" Nov 26 00:09:02 crc kubenswrapper[5045]: I1126 00:09:02.051746 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-8rvmp_581be194-4f18-4d0b-82fe-da014d72e03d/cert-manager-controller/0.log" Nov 26 00:09:02 crc kubenswrapper[5045]: I1126 00:09:02.140023 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-tj5h4_ca7e1b0d-b147-46cb-9537-6026becd4866/cert-manager-cainjector/0.log" Nov 26 00:09:02 crc kubenswrapper[5045]: I1126 00:09:02.255308 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-lpn7b_9f002a66-6d5b-49a1-881d-5ed4deb1a006/cert-manager-webhook/0.log" Nov 26 00:09:15 crc kubenswrapper[5045]: I1126 00:09:15.832650 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-srctj_6d561240-e484-4a96-aff1-aef1a5c56daf/nmstate-console-plugin/0.log" Nov 26 00:09:15 crc kubenswrapper[5045]: I1126 00:09:15.977691 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-gmpgg_523926ce-7459-44b3-bc6b-03782619bc1e/nmstate-handler/0.log" Nov 26 00:09:16 crc kubenswrapper[5045]: I1126 00:09:16.055336 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-2fvh8_10586198-1de3-4da4-9ba1-b79a9785da2f/kube-rbac-proxy/0.log" Nov 26 00:09:16 crc kubenswrapper[5045]: I1126 00:09:16.059474 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-2fvh8_10586198-1de3-4da4-9ba1-b79a9785da2f/nmstate-metrics/0.log" Nov 26 00:09:16 crc kubenswrapper[5045]: I1126 00:09:16.219086 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-cn5kf_64863e5b-fa79-4a6f-af83-631dafa8a1c4/nmstate-operator/0.log" Nov 26 00:09:16 crc kubenswrapper[5045]: I1126 00:09:16.232377 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-6vs48_8d0f4d8f-2c1d-4bdf-9aa0-2971cf03df73/nmstate-webhook/0.log" Nov 26 00:09:32 crc kubenswrapper[5045]: I1126 00:09:32.476380 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-4g95c_e22db1cb-76eb-4541-8098-95b688ccbe00/kube-rbac-proxy/0.log" Nov 26 00:09:32 crc kubenswrapper[5045]: I1126 00:09:32.673772 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-4g95c_e22db1cb-76eb-4541-8098-95b688ccbe00/controller/0.log" Nov 26 00:09:32 crc kubenswrapper[5045]: I1126 00:09:32.724150 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-frr-files/0.log" Nov 26 00:09:32 crc kubenswrapper[5045]: I1126 00:09:32.905778 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-reloader/0.log" Nov 26 00:09:32 crc kubenswrapper[5045]: I1126 00:09:32.906969 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-frr-files/0.log" Nov 26 00:09:32 crc kubenswrapper[5045]: I1126 00:09:32.918290 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-metrics/0.log" Nov 26 00:09:32 crc kubenswrapper[5045]: I1126 00:09:32.978145 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-reloader/0.log" Nov 26 00:09:33 crc kubenswrapper[5045]: I1126 00:09:33.149615 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-frr-files/0.log" Nov 26 00:09:33 crc kubenswrapper[5045]: I1126 00:09:33.195646 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-metrics/0.log" Nov 26 00:09:33 crc kubenswrapper[5045]: I1126 00:09:33.202138 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-metrics/0.log" Nov 26 00:09:33 crc kubenswrapper[5045]: I1126 00:09:33.202140 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-reloader/0.log" Nov 26 00:09:33 crc kubenswrapper[5045]: I1126 00:09:33.385900 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-frr-files/0.log" Nov 26 00:09:33 crc kubenswrapper[5045]: I1126 00:09:33.399191 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-reloader/0.log" Nov 26 00:09:33 crc kubenswrapper[5045]: I1126 00:09:33.416781 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/cp-metrics/0.log" Nov 26 00:09:33 crc kubenswrapper[5045]: I1126 00:09:33.440423 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/controller/0.log" Nov 26 00:09:33 crc kubenswrapper[5045]: I1126 00:09:33.590496 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/frr-metrics/0.log" Nov 26 00:09:33 crc kubenswrapper[5045]: I1126 00:09:33.625540 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/kube-rbac-proxy/0.log" Nov 26 00:09:33 crc kubenswrapper[5045]: I1126 00:09:33.666080 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/kube-rbac-proxy-frr/0.log" Nov 26 00:09:33 crc kubenswrapper[5045]: I1126 00:09:33.811186 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/reloader/0.log" Nov 26 00:09:33 crc kubenswrapper[5045]: I1126 00:09:33.891075 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-5l6nm_9c4acc23-28a7-432a-8a17-63550727f1a6/frr-k8s-webhook-server/0.log" Nov 26 00:09:34 crc kubenswrapper[5045]: I1126 00:09:34.044962 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-796ffbd7cd-282k4_211004af-dcb4-4397-bced-fd0c3e3da2a3/manager/0.log" Nov 26 00:09:34 crc kubenswrapper[5045]: I1126 00:09:34.257753 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-7cfb757c46-6g84d_aa1a672e-f330-4cef-bf0f-c471b30ac61d/webhook-server/0.log" Nov 26 00:09:34 crc kubenswrapper[5045]: I1126 00:09:34.337961 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-sbr7m_fcd4cd5e-d7b9-4666-8eac-781cee36189a/kube-rbac-proxy/0.log" Nov 26 00:09:34 crc kubenswrapper[5045]: I1126 00:09:34.885092 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-sbr7m_fcd4cd5e-d7b9-4666-8eac-781cee36189a/speaker/0.log" Nov 26 00:09:35 crc kubenswrapper[5045]: I1126 00:09:35.151295 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-p6hjd_6bab5b2e-6c1e-4464-b7d8-973e76401ba5/frr/0.log" Nov 26 00:09:48 crc kubenswrapper[5045]: I1126 00:09:48.461197 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2_2fc73dbf-cedf-448c-9c04-bd1db878524a/util/0.log" Nov 26 00:09:48 crc kubenswrapper[5045]: I1126 00:09:48.944330 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2_2fc73dbf-cedf-448c-9c04-bd1db878524a/pull/0.log" Nov 26 00:09:48 crc kubenswrapper[5045]: I1126 00:09:48.949139 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2_2fc73dbf-cedf-448c-9c04-bd1db878524a/pull/0.log" Nov 26 00:09:49 crc kubenswrapper[5045]: I1126 00:09:49.061186 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2_2fc73dbf-cedf-448c-9c04-bd1db878524a/util/0.log" Nov 26 00:09:49 crc kubenswrapper[5045]: I1126 00:09:49.101200 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2_2fc73dbf-cedf-448c-9c04-bd1db878524a/util/0.log" Nov 26 00:09:49 crc kubenswrapper[5045]: I1126 00:09:49.128890 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2_2fc73dbf-cedf-448c-9c04-bd1db878524a/extract/0.log" Nov 26 00:09:49 crc kubenswrapper[5045]: I1126 00:09:49.155407 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772etp5v2_2fc73dbf-cedf-448c-9c04-bd1db878524a/pull/0.log" Nov 26 00:09:49 crc kubenswrapper[5045]: I1126 00:09:49.264667 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9k62x_c53fe041-30c1-448d-9eaa-1db8e0163b83/extract-utilities/0.log" Nov 26 00:09:49 crc kubenswrapper[5045]: I1126 00:09:49.431104 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9k62x_c53fe041-30c1-448d-9eaa-1db8e0163b83/extract-content/0.log" Nov 26 00:09:49 crc kubenswrapper[5045]: I1126 00:09:49.445068 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9k62x_c53fe041-30c1-448d-9eaa-1db8e0163b83/extract-content/0.log" Nov 26 00:09:49 crc kubenswrapper[5045]: I1126 00:09:49.462580 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9k62x_c53fe041-30c1-448d-9eaa-1db8e0163b83/extract-utilities/0.log" Nov 26 00:09:49 crc kubenswrapper[5045]: I1126 00:09:49.641620 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9k62x_c53fe041-30c1-448d-9eaa-1db8e0163b83/extract-utilities/0.log" Nov 26 00:09:49 crc kubenswrapper[5045]: I1126 00:09:49.658402 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9k62x_c53fe041-30c1-448d-9eaa-1db8e0163b83/extract-content/0.log" Nov 26 00:09:49 crc kubenswrapper[5045]: I1126 00:09:49.872042 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4v65k_78a1fdd9-da9e-445f-9d75-167eff9d37a9/extract-utilities/0.log" Nov 26 00:09:50 crc kubenswrapper[5045]: I1126 00:09:50.125941 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4v65k_78a1fdd9-da9e-445f-9d75-167eff9d37a9/extract-content/0.log" Nov 26 00:09:50 crc kubenswrapper[5045]: I1126 00:09:50.139860 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9k62x_c53fe041-30c1-448d-9eaa-1db8e0163b83/registry-server/0.log" Nov 26 00:09:50 crc kubenswrapper[5045]: I1126 00:09:50.149974 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4v65k_78a1fdd9-da9e-445f-9d75-167eff9d37a9/extract-utilities/0.log" Nov 26 00:09:50 crc kubenswrapper[5045]: I1126 00:09:50.215077 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4v65k_78a1fdd9-da9e-445f-9d75-167eff9d37a9/extract-content/0.log" Nov 26 00:09:50 crc kubenswrapper[5045]: I1126 00:09:50.634567 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4v65k_78a1fdd9-da9e-445f-9d75-167eff9d37a9/extract-utilities/0.log" Nov 26 00:09:50 crc kubenswrapper[5045]: I1126 00:09:50.677539 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4v65k_78a1fdd9-da9e-445f-9d75-167eff9d37a9/extract-content/0.log" Nov 26 00:09:50 crc kubenswrapper[5045]: I1126 00:09:50.886301 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv_95a24d51-cffe-4618-b4f0-324815a1848e/util/0.log" Nov 26 00:09:51 crc kubenswrapper[5045]: I1126 00:09:51.213733 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv_95a24d51-cffe-4618-b4f0-324815a1848e/pull/0.log" Nov 26 00:09:51 crc kubenswrapper[5045]: I1126 00:09:51.267823 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4v65k_78a1fdd9-da9e-445f-9d75-167eff9d37a9/registry-server/0.log" Nov 26 00:09:51 crc kubenswrapper[5045]: I1126 00:09:51.287677 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv_95a24d51-cffe-4618-b4f0-324815a1848e/pull/0.log" Nov 26 00:09:51 crc kubenswrapper[5045]: I1126 00:09:51.374201 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv_95a24d51-cffe-4618-b4f0-324815a1848e/util/0.log" Nov 26 00:09:51 crc kubenswrapper[5045]: I1126 00:09:51.509087 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv_95a24d51-cffe-4618-b4f0-324815a1848e/util/0.log" Nov 26 00:09:51 crc kubenswrapper[5045]: I1126 00:09:51.522153 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv_95a24d51-cffe-4618-b4f0-324815a1848e/extract/0.log" Nov 26 00:09:51 crc kubenswrapper[5045]: I1126 00:09:51.522298 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6b47tv_95a24d51-cffe-4618-b4f0-324815a1848e/pull/0.log" Nov 26 00:09:51 crc kubenswrapper[5045]: I1126 00:09:51.737755 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-f5g5t_292cc94b-5ed6-4491-8168-1ac68858f418/marketplace-operator/0.log" Nov 26 00:09:51 crc kubenswrapper[5045]: I1126 00:09:51.776892 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bnp6n_3913a533-c2e2-4737-b52e-b90f29f979a5/extract-utilities/0.log" Nov 26 00:09:51 crc kubenswrapper[5045]: I1126 00:09:51.913282 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bnp6n_3913a533-c2e2-4737-b52e-b90f29f979a5/extract-content/0.log" Nov 26 00:09:51 crc kubenswrapper[5045]: I1126 00:09:51.915651 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bnp6n_3913a533-c2e2-4737-b52e-b90f29f979a5/extract-utilities/0.log" Nov 26 00:09:51 crc kubenswrapper[5045]: I1126 00:09:51.922490 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bnp6n_3913a533-c2e2-4737-b52e-b90f29f979a5/extract-content/0.log" Nov 26 00:09:52 crc kubenswrapper[5045]: I1126 00:09:52.270027 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bnp6n_3913a533-c2e2-4737-b52e-b90f29f979a5/extract-content/0.log" Nov 26 00:09:52 crc kubenswrapper[5045]: I1126 00:09:52.317883 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bnp6n_3913a533-c2e2-4737-b52e-b90f29f979a5/extract-utilities/0.log" Nov 26 00:09:52 crc kubenswrapper[5045]: I1126 00:09:52.401766 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bqxg8_6fc579e4-208c-4708-aa65-f7b2262eada1/extract-utilities/0.log" Nov 26 00:09:52 crc kubenswrapper[5045]: I1126 00:09:52.472211 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bnp6n_3913a533-c2e2-4737-b52e-b90f29f979a5/registry-server/0.log" Nov 26 00:09:52 crc kubenswrapper[5045]: I1126 00:09:52.513357 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bqxg8_6fc579e4-208c-4708-aa65-f7b2262eada1/extract-content/0.log" Nov 26 00:09:52 crc kubenswrapper[5045]: I1126 00:09:52.530230 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bqxg8_6fc579e4-208c-4708-aa65-f7b2262eada1/extract-utilities/0.log" Nov 26 00:09:52 crc kubenswrapper[5045]: I1126 00:09:52.611509 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bqxg8_6fc579e4-208c-4708-aa65-f7b2262eada1/extract-content/0.log" Nov 26 00:09:52 crc kubenswrapper[5045]: I1126 00:09:52.743974 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bqxg8_6fc579e4-208c-4708-aa65-f7b2262eada1/extract-utilities/0.log" Nov 26 00:09:52 crc kubenswrapper[5045]: I1126 00:09:52.756132 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bqxg8_6fc579e4-208c-4708-aa65-f7b2262eada1/extract-content/0.log" Nov 26 00:09:53 crc kubenswrapper[5045]: I1126 00:09:53.215566 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bqxg8_6fc579e4-208c-4708-aa65-f7b2262eada1/registry-server/0.log" Nov 26 00:10:30 crc kubenswrapper[5045]: I1126 00:10:30.540665 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:10:30 crc kubenswrapper[5045]: I1126 00:10:30.541266 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:11:00 crc kubenswrapper[5045]: I1126 00:11:00.540746 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:11:00 crc kubenswrapper[5045]: I1126 00:11:00.541427 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:11:30 crc kubenswrapper[5045]: I1126 00:11:30.540575 5045 patch_prober.go:28] interesting pod/machine-config-daemon-7dpm4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:11:30 crc kubenswrapper[5045]: I1126 00:11:30.541149 5045 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:11:30 crc kubenswrapper[5045]: I1126 00:11:30.541194 5045 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" Nov 26 00:11:30 crc kubenswrapper[5045]: I1126 00:11:30.541936 5045 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7"} pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 00:11:30 crc kubenswrapper[5045]: I1126 00:11:30.541989 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerName="machine-config-daemon" containerID="cri-o://04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" gracePeriod=600 Nov 26 00:11:30 crc kubenswrapper[5045]: E1126 00:11:30.665991 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:11:31 crc kubenswrapper[5045]: I1126 00:11:31.629017 5045 generic.go:334] "Generic (PLEG): container finished" podID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" exitCode=0 Nov 26 00:11:31 crc kubenswrapper[5045]: I1126 00:11:31.629077 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" event={"ID":"bc394db7-8b38-4abe-841d-83a3ea3d07b0","Type":"ContainerDied","Data":"04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7"} Nov 26 00:11:31 crc kubenswrapper[5045]: I1126 00:11:31.629553 5045 scope.go:117] "RemoveContainer" containerID="28b72954faac5d5a2ef5c6de8b1d4873e24cc35ea4d44d3b52d3e1df98dfe607" Nov 26 00:11:31 crc kubenswrapper[5045]: I1126 00:11:31.630527 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:11:31 crc kubenswrapper[5045]: E1126 00:11:31.631115 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:11:37 crc kubenswrapper[5045]: I1126 00:11:37.691669 5045 generic.go:334] "Generic (PLEG): container finished" podID="3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8" containerID="177da48c7721c2b19db290d18d451f9c77259fa3079bf7b6cf00f16597ae1f5e" exitCode=0 Nov 26 00:11:37 crc kubenswrapper[5045]: I1126 00:11:37.691810 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vcnbd/must-gather-5xj6j" event={"ID":"3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8","Type":"ContainerDied","Data":"177da48c7721c2b19db290d18d451f9c77259fa3079bf7b6cf00f16597ae1f5e"} Nov 26 00:11:37 crc kubenswrapper[5045]: I1126 00:11:37.692891 5045 scope.go:117] "RemoveContainer" containerID="177da48c7721c2b19db290d18d451f9c77259fa3079bf7b6cf00f16597ae1f5e" Nov 26 00:11:38 crc kubenswrapper[5045]: I1126 00:11:38.332452 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-vcnbd_must-gather-5xj6j_3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8/gather/0.log" Nov 26 00:11:46 crc kubenswrapper[5045]: I1126 00:11:46.397616 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:11:46 crc kubenswrapper[5045]: E1126 00:11:46.399341 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:11:48 crc kubenswrapper[5045]: I1126 00:11:48.460862 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-vcnbd/must-gather-5xj6j"] Nov 26 00:11:48 crc kubenswrapper[5045]: I1126 00:11:48.461533 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-vcnbd/must-gather-5xj6j" podUID="3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8" containerName="copy" containerID="cri-o://ee8d848ca6a087bc1fda35bb09f5b6f7c002f5d4d4905a968124dd47d380b93b" gracePeriod=2 Nov 26 00:11:48 crc kubenswrapper[5045]: I1126 00:11:48.470076 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-vcnbd/must-gather-5xj6j"] Nov 26 00:11:48 crc kubenswrapper[5045]: I1126 00:11:48.815370 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-vcnbd_must-gather-5xj6j_3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8/copy/0.log" Nov 26 00:11:48 crc kubenswrapper[5045]: I1126 00:11:48.816303 5045 generic.go:334] "Generic (PLEG): container finished" podID="3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8" containerID="ee8d848ca6a087bc1fda35bb09f5b6f7c002f5d4d4905a968124dd47d380b93b" exitCode=143 Nov 26 00:11:48 crc kubenswrapper[5045]: I1126 00:11:48.982145 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-vcnbd_must-gather-5xj6j_3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8/copy/0.log" Nov 26 00:11:48 crc kubenswrapper[5045]: I1126 00:11:48.982638 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vcnbd/must-gather-5xj6j" Nov 26 00:11:49 crc kubenswrapper[5045]: I1126 00:11:49.018890 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m42vk\" (UniqueName: \"kubernetes.io/projected/3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8-kube-api-access-m42vk\") pod \"3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8\" (UID: \"3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8\") " Nov 26 00:11:49 crc kubenswrapper[5045]: I1126 00:11:49.018991 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8-must-gather-output\") pod \"3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8\" (UID: \"3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8\") " Nov 26 00:11:49 crc kubenswrapper[5045]: I1126 00:11:49.024944 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8-kube-api-access-m42vk" (OuterVolumeSpecName: "kube-api-access-m42vk") pod "3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8" (UID: "3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8"). InnerVolumeSpecName "kube-api-access-m42vk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:11:49 crc kubenswrapper[5045]: I1126 00:11:49.121350 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m42vk\" (UniqueName: \"kubernetes.io/projected/3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8-kube-api-access-m42vk\") on node \"crc\" DevicePath \"\"" Nov 26 00:11:49 crc kubenswrapper[5045]: I1126 00:11:49.178802 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8" (UID: "3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:11:49 crc kubenswrapper[5045]: I1126 00:11:49.222579 5045 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 26 00:11:49 crc kubenswrapper[5045]: I1126 00:11:49.830459 5045 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-vcnbd_must-gather-5xj6j_3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8/copy/0.log" Nov 26 00:11:49 crc kubenswrapper[5045]: I1126 00:11:49.831469 5045 scope.go:117] "RemoveContainer" containerID="ee8d848ca6a087bc1fda35bb09f5b6f7c002f5d4d4905a968124dd47d380b93b" Nov 26 00:11:49 crc kubenswrapper[5045]: I1126 00:11:49.831621 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vcnbd/must-gather-5xj6j" Nov 26 00:11:49 crc kubenswrapper[5045]: I1126 00:11:49.873820 5045 scope.go:117] "RemoveContainer" containerID="177da48c7721c2b19db290d18d451f9c77259fa3079bf7b6cf00f16597ae1f5e" Nov 26 00:11:50 crc kubenswrapper[5045]: I1126 00:11:50.414363 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8" path="/var/lib/kubelet/pods/3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8/volumes" Nov 26 00:11:59 crc kubenswrapper[5045]: I1126 00:11:59.396850 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:11:59 crc kubenswrapper[5045]: E1126 00:11:59.397791 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:12:01 crc kubenswrapper[5045]: I1126 00:12:01.736554 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-w8g5x"] Nov 26 00:12:01 crc kubenswrapper[5045]: E1126 00:12:01.737571 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8" containerName="copy" Nov 26 00:12:01 crc kubenswrapper[5045]: I1126 00:12:01.737595 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8" containerName="copy" Nov 26 00:12:01 crc kubenswrapper[5045]: E1126 00:12:01.737631 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8" containerName="gather" Nov 26 00:12:01 crc kubenswrapper[5045]: I1126 00:12:01.737643 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8" containerName="gather" Nov 26 00:12:01 crc kubenswrapper[5045]: E1126 00:12:01.737678 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64ca8b55-c605-4872-bd75-4bdc6ec640ac" containerName="container-00" Nov 26 00:12:01 crc kubenswrapper[5045]: I1126 00:12:01.737694 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="64ca8b55-c605-4872-bd75-4bdc6ec640ac" containerName="container-00" Nov 26 00:12:01 crc kubenswrapper[5045]: I1126 00:12:01.738141 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8" containerName="gather" Nov 26 00:12:01 crc kubenswrapper[5045]: I1126 00:12:01.738172 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="64ca8b55-c605-4872-bd75-4bdc6ec640ac" containerName="container-00" Nov 26 00:12:01 crc kubenswrapper[5045]: I1126 00:12:01.738204 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="3abfc9c2-a0a0-4c61-a30d-1cd6bed98da8" containerName="copy" Nov 26 00:12:01 crc kubenswrapper[5045]: I1126 00:12:01.740702 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w8g5x" Nov 26 00:12:01 crc kubenswrapper[5045]: I1126 00:12:01.767780 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w8g5x"] Nov 26 00:12:01 crc kubenswrapper[5045]: I1126 00:12:01.789347 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8-utilities\") pod \"redhat-operators-w8g5x\" (UID: \"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8\") " pod="openshift-marketplace/redhat-operators-w8g5x" Nov 26 00:12:01 crc kubenswrapper[5045]: I1126 00:12:01.789485 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8-catalog-content\") pod \"redhat-operators-w8g5x\" (UID: \"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8\") " pod="openshift-marketplace/redhat-operators-w8g5x" Nov 26 00:12:01 crc kubenswrapper[5045]: I1126 00:12:01.789646 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6l2dt\" (UniqueName: \"kubernetes.io/projected/ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8-kube-api-access-6l2dt\") pod \"redhat-operators-w8g5x\" (UID: \"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8\") " pod="openshift-marketplace/redhat-operators-w8g5x" Nov 26 00:12:01 crc kubenswrapper[5045]: I1126 00:12:01.890556 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6l2dt\" (UniqueName: \"kubernetes.io/projected/ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8-kube-api-access-6l2dt\") pod \"redhat-operators-w8g5x\" (UID: \"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8\") " pod="openshift-marketplace/redhat-operators-w8g5x" Nov 26 00:12:01 crc kubenswrapper[5045]: I1126 00:12:01.890696 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8-utilities\") pod \"redhat-operators-w8g5x\" (UID: \"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8\") " pod="openshift-marketplace/redhat-operators-w8g5x" Nov 26 00:12:01 crc kubenswrapper[5045]: I1126 00:12:01.890846 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8-catalog-content\") pod \"redhat-operators-w8g5x\" (UID: \"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8\") " pod="openshift-marketplace/redhat-operators-w8g5x" Nov 26 00:12:01 crc kubenswrapper[5045]: I1126 00:12:01.891491 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8-catalog-content\") pod \"redhat-operators-w8g5x\" (UID: \"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8\") " pod="openshift-marketplace/redhat-operators-w8g5x" Nov 26 00:12:01 crc kubenswrapper[5045]: I1126 00:12:01.891496 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8-utilities\") pod \"redhat-operators-w8g5x\" (UID: \"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8\") " pod="openshift-marketplace/redhat-operators-w8g5x" Nov 26 00:12:01 crc kubenswrapper[5045]: I1126 00:12:01.922647 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6l2dt\" (UniqueName: \"kubernetes.io/projected/ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8-kube-api-access-6l2dt\") pod \"redhat-operators-w8g5x\" (UID: \"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8\") " pod="openshift-marketplace/redhat-operators-w8g5x" Nov 26 00:12:02 crc kubenswrapper[5045]: I1126 00:12:02.081816 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w8g5x" Nov 26 00:12:02 crc kubenswrapper[5045]: I1126 00:12:02.574674 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w8g5x"] Nov 26 00:12:03 crc kubenswrapper[5045]: I1126 00:12:03.983279 5045 generic.go:334] "Generic (PLEG): container finished" podID="ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8" containerID="32406a554cee308034f0ebc34c5625ad87bfdd4a32579309dfb0b059a56cabc6" exitCode=0 Nov 26 00:12:03 crc kubenswrapper[5045]: I1126 00:12:03.983810 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w8g5x" event={"ID":"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8","Type":"ContainerDied","Data":"32406a554cee308034f0ebc34c5625ad87bfdd4a32579309dfb0b059a56cabc6"} Nov 26 00:12:03 crc kubenswrapper[5045]: I1126 00:12:03.983851 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w8g5x" event={"ID":"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8","Type":"ContainerStarted","Data":"11d14aeb36ee52e8c75df16b0f5648b75bdcbfadfc93efba3a7f2e1846672033"} Nov 26 00:12:03 crc kubenswrapper[5045]: I1126 00:12:03.986435 5045 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 00:12:06 crc kubenswrapper[5045]: I1126 00:12:06.010041 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w8g5x" event={"ID":"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8","Type":"ContainerStarted","Data":"101c7e12f6b646228a1ee43e43805db59f96c21085e5e67dfa585cf82b230843"} Nov 26 00:12:08 crc kubenswrapper[5045]: I1126 00:12:08.049706 5045 generic.go:334] "Generic (PLEG): container finished" podID="ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8" containerID="101c7e12f6b646228a1ee43e43805db59f96c21085e5e67dfa585cf82b230843" exitCode=0 Nov 26 00:12:08 crc kubenswrapper[5045]: I1126 00:12:08.049772 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w8g5x" event={"ID":"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8","Type":"ContainerDied","Data":"101c7e12f6b646228a1ee43e43805db59f96c21085e5e67dfa585cf82b230843"} Nov 26 00:12:09 crc kubenswrapper[5045]: I1126 00:12:09.064578 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w8g5x" event={"ID":"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8","Type":"ContainerStarted","Data":"b2017da9e552b212b2a05861032e700adf1ec06dd52b037ce3c9c6b74f70b2a3"} Nov 26 00:12:09 crc kubenswrapper[5045]: I1126 00:12:09.091589 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-w8g5x" podStartSLOduration=3.593428739 podStartE2EDuration="8.091560396s" podCreationTimestamp="2025-11-26 00:12:01 +0000 UTC" firstStartedPulling="2025-11-26 00:12:03.986069824 +0000 UTC m=+4380.343728946" lastFinishedPulling="2025-11-26 00:12:08.484201491 +0000 UTC m=+4384.841860603" observedRunningTime="2025-11-26 00:12:09.083302074 +0000 UTC m=+4385.440961226" watchObservedRunningTime="2025-11-26 00:12:09.091560396 +0000 UTC m=+4385.449219548" Nov 26 00:12:12 crc kubenswrapper[5045]: I1126 00:12:12.082867 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-w8g5x" Nov 26 00:12:12 crc kubenswrapper[5045]: I1126 00:12:12.083156 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-w8g5x" Nov 26 00:12:13 crc kubenswrapper[5045]: I1126 00:12:13.172936 5045 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-w8g5x" podUID="ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8" containerName="registry-server" probeResult="failure" output=< Nov 26 00:12:13 crc kubenswrapper[5045]: timeout: failed to connect service ":50051" within 1s Nov 26 00:12:13 crc kubenswrapper[5045]: > Nov 26 00:12:14 crc kubenswrapper[5045]: I1126 00:12:14.407038 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:12:14 crc kubenswrapper[5045]: E1126 00:12:14.407620 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:12:22 crc kubenswrapper[5045]: I1126 00:12:22.154044 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-w8g5x" Nov 26 00:12:22 crc kubenswrapper[5045]: I1126 00:12:22.212907 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-w8g5x" Nov 26 00:12:22 crc kubenswrapper[5045]: I1126 00:12:22.416434 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-w8g5x"] Nov 26 00:12:23 crc kubenswrapper[5045]: I1126 00:12:23.205536 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-w8g5x" podUID="ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8" containerName="registry-server" containerID="cri-o://b2017da9e552b212b2a05861032e700adf1ec06dd52b037ce3c9c6b74f70b2a3" gracePeriod=2 Nov 26 00:12:23 crc kubenswrapper[5045]: I1126 00:12:23.777027 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w8g5x" Nov 26 00:12:23 crc kubenswrapper[5045]: I1126 00:12:23.866761 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8-utilities\") pod \"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8\" (UID: \"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8\") " Nov 26 00:12:23 crc kubenswrapper[5045]: I1126 00:12:23.866861 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8-catalog-content\") pod \"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8\" (UID: \"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8\") " Nov 26 00:12:23 crc kubenswrapper[5045]: I1126 00:12:23.866908 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6l2dt\" (UniqueName: \"kubernetes.io/projected/ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8-kube-api-access-6l2dt\") pod \"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8\" (UID: \"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8\") " Nov 26 00:12:23 crc kubenswrapper[5045]: I1126 00:12:23.867497 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8-utilities" (OuterVolumeSpecName: "utilities") pod "ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8" (UID: "ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:12:23 crc kubenswrapper[5045]: I1126 00:12:23.873266 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8-kube-api-access-6l2dt" (OuterVolumeSpecName: "kube-api-access-6l2dt") pod "ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8" (UID: "ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8"). InnerVolumeSpecName "kube-api-access-6l2dt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:12:23 crc kubenswrapper[5045]: I1126 00:12:23.970021 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:23 crc kubenswrapper[5045]: I1126 00:12:23.970080 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6l2dt\" (UniqueName: \"kubernetes.io/projected/ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8-kube-api-access-6l2dt\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:23 crc kubenswrapper[5045]: I1126 00:12:23.973936 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8" (UID: "ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:12:24 crc kubenswrapper[5045]: I1126 00:12:24.072536 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:24 crc kubenswrapper[5045]: I1126 00:12:24.223893 5045 generic.go:334] "Generic (PLEG): container finished" podID="ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8" containerID="b2017da9e552b212b2a05861032e700adf1ec06dd52b037ce3c9c6b74f70b2a3" exitCode=0 Nov 26 00:12:24 crc kubenswrapper[5045]: I1126 00:12:24.223961 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w8g5x" Nov 26 00:12:24 crc kubenswrapper[5045]: I1126 00:12:24.223963 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w8g5x" event={"ID":"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8","Type":"ContainerDied","Data":"b2017da9e552b212b2a05861032e700adf1ec06dd52b037ce3c9c6b74f70b2a3"} Nov 26 00:12:24 crc kubenswrapper[5045]: I1126 00:12:24.224160 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w8g5x" event={"ID":"ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8","Type":"ContainerDied","Data":"11d14aeb36ee52e8c75df16b0f5648b75bdcbfadfc93efba3a7f2e1846672033"} Nov 26 00:12:24 crc kubenswrapper[5045]: I1126 00:12:24.224221 5045 scope.go:117] "RemoveContainer" containerID="b2017da9e552b212b2a05861032e700adf1ec06dd52b037ce3c9c6b74f70b2a3" Nov 26 00:12:24 crc kubenswrapper[5045]: I1126 00:12:24.252303 5045 scope.go:117] "RemoveContainer" containerID="101c7e12f6b646228a1ee43e43805db59f96c21085e5e67dfa585cf82b230843" Nov 26 00:12:24 crc kubenswrapper[5045]: I1126 00:12:24.290877 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-w8g5x"] Nov 26 00:12:24 crc kubenswrapper[5045]: I1126 00:12:24.309416 5045 scope.go:117] "RemoveContainer" containerID="32406a554cee308034f0ebc34c5625ad87bfdd4a32579309dfb0b059a56cabc6" Nov 26 00:12:24 crc kubenswrapper[5045]: I1126 00:12:24.310964 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-w8g5x"] Nov 26 00:12:24 crc kubenswrapper[5045]: I1126 00:12:24.354384 5045 scope.go:117] "RemoveContainer" containerID="b2017da9e552b212b2a05861032e700adf1ec06dd52b037ce3c9c6b74f70b2a3" Nov 26 00:12:24 crc kubenswrapper[5045]: E1126 00:12:24.355263 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2017da9e552b212b2a05861032e700adf1ec06dd52b037ce3c9c6b74f70b2a3\": container with ID starting with b2017da9e552b212b2a05861032e700adf1ec06dd52b037ce3c9c6b74f70b2a3 not found: ID does not exist" containerID="b2017da9e552b212b2a05861032e700adf1ec06dd52b037ce3c9c6b74f70b2a3" Nov 26 00:12:24 crc kubenswrapper[5045]: I1126 00:12:24.355305 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2017da9e552b212b2a05861032e700adf1ec06dd52b037ce3c9c6b74f70b2a3"} err="failed to get container status \"b2017da9e552b212b2a05861032e700adf1ec06dd52b037ce3c9c6b74f70b2a3\": rpc error: code = NotFound desc = could not find container \"b2017da9e552b212b2a05861032e700adf1ec06dd52b037ce3c9c6b74f70b2a3\": container with ID starting with b2017da9e552b212b2a05861032e700adf1ec06dd52b037ce3c9c6b74f70b2a3 not found: ID does not exist" Nov 26 00:12:24 crc kubenswrapper[5045]: I1126 00:12:24.355334 5045 scope.go:117] "RemoveContainer" containerID="101c7e12f6b646228a1ee43e43805db59f96c21085e5e67dfa585cf82b230843" Nov 26 00:12:24 crc kubenswrapper[5045]: E1126 00:12:24.355810 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"101c7e12f6b646228a1ee43e43805db59f96c21085e5e67dfa585cf82b230843\": container with ID starting with 101c7e12f6b646228a1ee43e43805db59f96c21085e5e67dfa585cf82b230843 not found: ID does not exist" containerID="101c7e12f6b646228a1ee43e43805db59f96c21085e5e67dfa585cf82b230843" Nov 26 00:12:24 crc kubenswrapper[5045]: I1126 00:12:24.355832 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"101c7e12f6b646228a1ee43e43805db59f96c21085e5e67dfa585cf82b230843"} err="failed to get container status \"101c7e12f6b646228a1ee43e43805db59f96c21085e5e67dfa585cf82b230843\": rpc error: code = NotFound desc = could not find container \"101c7e12f6b646228a1ee43e43805db59f96c21085e5e67dfa585cf82b230843\": container with ID starting with 101c7e12f6b646228a1ee43e43805db59f96c21085e5e67dfa585cf82b230843 not found: ID does not exist" Nov 26 00:12:24 crc kubenswrapper[5045]: I1126 00:12:24.355849 5045 scope.go:117] "RemoveContainer" containerID="32406a554cee308034f0ebc34c5625ad87bfdd4a32579309dfb0b059a56cabc6" Nov 26 00:12:24 crc kubenswrapper[5045]: E1126 00:12:24.356162 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32406a554cee308034f0ebc34c5625ad87bfdd4a32579309dfb0b059a56cabc6\": container with ID starting with 32406a554cee308034f0ebc34c5625ad87bfdd4a32579309dfb0b059a56cabc6 not found: ID does not exist" containerID="32406a554cee308034f0ebc34c5625ad87bfdd4a32579309dfb0b059a56cabc6" Nov 26 00:12:24 crc kubenswrapper[5045]: I1126 00:12:24.356188 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32406a554cee308034f0ebc34c5625ad87bfdd4a32579309dfb0b059a56cabc6"} err="failed to get container status \"32406a554cee308034f0ebc34c5625ad87bfdd4a32579309dfb0b059a56cabc6\": rpc error: code = NotFound desc = could not find container \"32406a554cee308034f0ebc34c5625ad87bfdd4a32579309dfb0b059a56cabc6\": container with ID starting with 32406a554cee308034f0ebc34c5625ad87bfdd4a32579309dfb0b059a56cabc6 not found: ID does not exist" Nov 26 00:12:24 crc kubenswrapper[5045]: I1126 00:12:24.409748 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8" path="/var/lib/kubelet/pods/ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8/volumes" Nov 26 00:12:26 crc kubenswrapper[5045]: I1126 00:12:26.397762 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:12:26 crc kubenswrapper[5045]: E1126 00:12:26.398507 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:12:41 crc kubenswrapper[5045]: I1126 00:12:41.396988 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:12:41 crc kubenswrapper[5045]: E1126 00:12:41.398670 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:12:52 crc kubenswrapper[5045]: I1126 00:12:52.399160 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:12:52 crc kubenswrapper[5045]: E1126 00:12:52.400503 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:13:05 crc kubenswrapper[5045]: I1126 00:13:05.397474 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:13:05 crc kubenswrapper[5045]: E1126 00:13:05.398340 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:13:18 crc kubenswrapper[5045]: I1126 00:13:18.397412 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:13:18 crc kubenswrapper[5045]: E1126 00:13:18.398885 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:13:32 crc kubenswrapper[5045]: I1126 00:13:32.398425 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:13:32 crc kubenswrapper[5045]: E1126 00:13:32.399186 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:13:43 crc kubenswrapper[5045]: I1126 00:13:43.397174 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:13:43 crc kubenswrapper[5045]: E1126 00:13:43.398181 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:13:56 crc kubenswrapper[5045]: I1126 00:13:56.397342 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:13:56 crc kubenswrapper[5045]: E1126 00:13:56.398103 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:14:10 crc kubenswrapper[5045]: I1126 00:14:10.396945 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:14:10 crc kubenswrapper[5045]: E1126 00:14:10.400664 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:14:12 crc kubenswrapper[5045]: I1126 00:14:12.653505 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-s4dls"] Nov 26 00:14:12 crc kubenswrapper[5045]: E1126 00:14:12.654295 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8" containerName="extract-utilities" Nov 26 00:14:12 crc kubenswrapper[5045]: I1126 00:14:12.654310 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8" containerName="extract-utilities" Nov 26 00:14:12 crc kubenswrapper[5045]: E1126 00:14:12.654327 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8" containerName="extract-content" Nov 26 00:14:12 crc kubenswrapper[5045]: I1126 00:14:12.654336 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8" containerName="extract-content" Nov 26 00:14:12 crc kubenswrapper[5045]: E1126 00:14:12.654372 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8" containerName="registry-server" Nov 26 00:14:12 crc kubenswrapper[5045]: I1126 00:14:12.654380 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8" containerName="registry-server" Nov 26 00:14:12 crc kubenswrapper[5045]: I1126 00:14:12.654635 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea80a29f-ef7d-4e87-a69b-3736e9a4f3e8" containerName="registry-server" Nov 26 00:14:12 crc kubenswrapper[5045]: I1126 00:14:12.656269 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s4dls" Nov 26 00:14:12 crc kubenswrapper[5045]: I1126 00:14:12.670180 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-s4dls"] Nov 26 00:14:12 crc kubenswrapper[5045]: I1126 00:14:12.796056 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x955c\" (UniqueName: \"kubernetes.io/projected/7196e986-7413-4477-ae8c-2f1a601a320a-kube-api-access-x955c\") pod \"community-operators-s4dls\" (UID: \"7196e986-7413-4477-ae8c-2f1a601a320a\") " pod="openshift-marketplace/community-operators-s4dls" Nov 26 00:14:12 crc kubenswrapper[5045]: I1126 00:14:12.796144 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7196e986-7413-4477-ae8c-2f1a601a320a-utilities\") pod \"community-operators-s4dls\" (UID: \"7196e986-7413-4477-ae8c-2f1a601a320a\") " pod="openshift-marketplace/community-operators-s4dls" Nov 26 00:14:12 crc kubenswrapper[5045]: I1126 00:14:12.796412 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7196e986-7413-4477-ae8c-2f1a601a320a-catalog-content\") pod \"community-operators-s4dls\" (UID: \"7196e986-7413-4477-ae8c-2f1a601a320a\") " pod="openshift-marketplace/community-operators-s4dls" Nov 26 00:14:12 crc kubenswrapper[5045]: I1126 00:14:12.897907 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x955c\" (UniqueName: \"kubernetes.io/projected/7196e986-7413-4477-ae8c-2f1a601a320a-kube-api-access-x955c\") pod \"community-operators-s4dls\" (UID: \"7196e986-7413-4477-ae8c-2f1a601a320a\") " pod="openshift-marketplace/community-operators-s4dls" Nov 26 00:14:12 crc kubenswrapper[5045]: I1126 00:14:12.897996 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7196e986-7413-4477-ae8c-2f1a601a320a-utilities\") pod \"community-operators-s4dls\" (UID: \"7196e986-7413-4477-ae8c-2f1a601a320a\") " pod="openshift-marketplace/community-operators-s4dls" Nov 26 00:14:12 crc kubenswrapper[5045]: I1126 00:14:12.898070 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7196e986-7413-4477-ae8c-2f1a601a320a-catalog-content\") pod \"community-operators-s4dls\" (UID: \"7196e986-7413-4477-ae8c-2f1a601a320a\") " pod="openshift-marketplace/community-operators-s4dls" Nov 26 00:14:12 crc kubenswrapper[5045]: I1126 00:14:12.898942 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7196e986-7413-4477-ae8c-2f1a601a320a-catalog-content\") pod \"community-operators-s4dls\" (UID: \"7196e986-7413-4477-ae8c-2f1a601a320a\") " pod="openshift-marketplace/community-operators-s4dls" Nov 26 00:14:12 crc kubenswrapper[5045]: I1126 00:14:12.900474 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7196e986-7413-4477-ae8c-2f1a601a320a-utilities\") pod \"community-operators-s4dls\" (UID: \"7196e986-7413-4477-ae8c-2f1a601a320a\") " pod="openshift-marketplace/community-operators-s4dls" Nov 26 00:14:12 crc kubenswrapper[5045]: I1126 00:14:12.921663 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x955c\" (UniqueName: \"kubernetes.io/projected/7196e986-7413-4477-ae8c-2f1a601a320a-kube-api-access-x955c\") pod \"community-operators-s4dls\" (UID: \"7196e986-7413-4477-ae8c-2f1a601a320a\") " pod="openshift-marketplace/community-operators-s4dls" Nov 26 00:14:12 crc kubenswrapper[5045]: I1126 00:14:12.985137 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s4dls" Nov 26 00:14:13 crc kubenswrapper[5045]: I1126 00:14:13.541200 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-s4dls"] Nov 26 00:14:13 crc kubenswrapper[5045]: W1126 00:14:13.545240 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7196e986_7413_4477_ae8c_2f1a601a320a.slice/crio-d8d4277e6a07443a1d1c54a62907890415b4b58ca5dc4d3f235c6d5a3ad37694 WatchSource:0}: Error finding container d8d4277e6a07443a1d1c54a62907890415b4b58ca5dc4d3f235c6d5a3ad37694: Status 404 returned error can't find the container with id d8d4277e6a07443a1d1c54a62907890415b4b58ca5dc4d3f235c6d5a3ad37694 Nov 26 00:14:13 crc kubenswrapper[5045]: I1126 00:14:13.945440 5045 generic.go:334] "Generic (PLEG): container finished" podID="7196e986-7413-4477-ae8c-2f1a601a320a" containerID="d473372ef92ef9a9d522629e045446e6456dceb31f4db0a1927580ebb589325e" exitCode=0 Nov 26 00:14:13 crc kubenswrapper[5045]: I1126 00:14:13.945587 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s4dls" event={"ID":"7196e986-7413-4477-ae8c-2f1a601a320a","Type":"ContainerDied","Data":"d473372ef92ef9a9d522629e045446e6456dceb31f4db0a1927580ebb589325e"} Nov 26 00:14:13 crc kubenswrapper[5045]: I1126 00:14:13.945857 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s4dls" event={"ID":"7196e986-7413-4477-ae8c-2f1a601a320a","Type":"ContainerStarted","Data":"d8d4277e6a07443a1d1c54a62907890415b4b58ca5dc4d3f235c6d5a3ad37694"} Nov 26 00:14:15 crc kubenswrapper[5045]: I1126 00:14:15.991436 5045 generic.go:334] "Generic (PLEG): container finished" podID="7196e986-7413-4477-ae8c-2f1a601a320a" containerID="d708eb6af3c70536fa3220147eaa52db98e9f06947531214964cec699bd6bc6b" exitCode=0 Nov 26 00:14:15 crc kubenswrapper[5045]: I1126 00:14:15.991652 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s4dls" event={"ID":"7196e986-7413-4477-ae8c-2f1a601a320a","Type":"ContainerDied","Data":"d708eb6af3c70536fa3220147eaa52db98e9f06947531214964cec699bd6bc6b"} Nov 26 00:14:17 crc kubenswrapper[5045]: I1126 00:14:17.010688 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s4dls" event={"ID":"7196e986-7413-4477-ae8c-2f1a601a320a","Type":"ContainerStarted","Data":"bac60a68dfc4a18a6ab64fc24b9ef7aac198e7441733f309da45f59fc7093f62"} Nov 26 00:14:17 crc kubenswrapper[5045]: I1126 00:14:17.049189 5045 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-s4dls" podStartSLOduration=2.373653378 podStartE2EDuration="5.04916783s" podCreationTimestamp="2025-11-26 00:14:12 +0000 UTC" firstStartedPulling="2025-11-26 00:14:13.949495552 +0000 UTC m=+4510.307154704" lastFinishedPulling="2025-11-26 00:14:16.625010014 +0000 UTC m=+4512.982669156" observedRunningTime="2025-11-26 00:14:17.040367802 +0000 UTC m=+4513.398026924" watchObservedRunningTime="2025-11-26 00:14:17.04916783 +0000 UTC m=+4513.406826952" Nov 26 00:14:21 crc kubenswrapper[5045]: I1126 00:14:21.396564 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:14:21 crc kubenswrapper[5045]: E1126 00:14:21.397315 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:14:22 crc kubenswrapper[5045]: I1126 00:14:22.985396 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-s4dls" Nov 26 00:14:22 crc kubenswrapper[5045]: I1126 00:14:22.986093 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-s4dls" Nov 26 00:14:23 crc kubenswrapper[5045]: I1126 00:14:23.058250 5045 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-s4dls" Nov 26 00:14:23 crc kubenswrapper[5045]: I1126 00:14:23.139536 5045 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-s4dls" Nov 26 00:14:23 crc kubenswrapper[5045]: I1126 00:14:23.310168 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-s4dls"] Nov 26 00:14:25 crc kubenswrapper[5045]: I1126 00:14:25.092980 5045 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-s4dls" podUID="7196e986-7413-4477-ae8c-2f1a601a320a" containerName="registry-server" containerID="cri-o://bac60a68dfc4a18a6ab64fc24b9ef7aac198e7441733f309da45f59fc7093f62" gracePeriod=2 Nov 26 00:14:25 crc kubenswrapper[5045]: I1126 00:14:25.680924 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s4dls" Nov 26 00:14:25 crc kubenswrapper[5045]: I1126 00:14:25.795274 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x955c\" (UniqueName: \"kubernetes.io/projected/7196e986-7413-4477-ae8c-2f1a601a320a-kube-api-access-x955c\") pod \"7196e986-7413-4477-ae8c-2f1a601a320a\" (UID: \"7196e986-7413-4477-ae8c-2f1a601a320a\") " Nov 26 00:14:25 crc kubenswrapper[5045]: I1126 00:14:25.795347 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7196e986-7413-4477-ae8c-2f1a601a320a-utilities\") pod \"7196e986-7413-4477-ae8c-2f1a601a320a\" (UID: \"7196e986-7413-4477-ae8c-2f1a601a320a\") " Nov 26 00:14:25 crc kubenswrapper[5045]: I1126 00:14:25.795519 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7196e986-7413-4477-ae8c-2f1a601a320a-catalog-content\") pod \"7196e986-7413-4477-ae8c-2f1a601a320a\" (UID: \"7196e986-7413-4477-ae8c-2f1a601a320a\") " Nov 26 00:14:25 crc kubenswrapper[5045]: I1126 00:14:25.797901 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7196e986-7413-4477-ae8c-2f1a601a320a-utilities" (OuterVolumeSpecName: "utilities") pod "7196e986-7413-4477-ae8c-2f1a601a320a" (UID: "7196e986-7413-4477-ae8c-2f1a601a320a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:14:25 crc kubenswrapper[5045]: I1126 00:14:25.804082 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7196e986-7413-4477-ae8c-2f1a601a320a-kube-api-access-x955c" (OuterVolumeSpecName: "kube-api-access-x955c") pod "7196e986-7413-4477-ae8c-2f1a601a320a" (UID: "7196e986-7413-4477-ae8c-2f1a601a320a"). InnerVolumeSpecName "kube-api-access-x955c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:14:25 crc kubenswrapper[5045]: I1126 00:14:25.887152 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7196e986-7413-4477-ae8c-2f1a601a320a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7196e986-7413-4477-ae8c-2f1a601a320a" (UID: "7196e986-7413-4477-ae8c-2f1a601a320a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:14:25 crc kubenswrapper[5045]: I1126 00:14:25.899314 5045 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7196e986-7413-4477-ae8c-2f1a601a320a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:14:25 crc kubenswrapper[5045]: I1126 00:14:25.899357 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x955c\" (UniqueName: \"kubernetes.io/projected/7196e986-7413-4477-ae8c-2f1a601a320a-kube-api-access-x955c\") on node \"crc\" DevicePath \"\"" Nov 26 00:14:25 crc kubenswrapper[5045]: I1126 00:14:25.899376 5045 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7196e986-7413-4477-ae8c-2f1a601a320a-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:14:26 crc kubenswrapper[5045]: I1126 00:14:26.103008 5045 generic.go:334] "Generic (PLEG): container finished" podID="7196e986-7413-4477-ae8c-2f1a601a320a" containerID="bac60a68dfc4a18a6ab64fc24b9ef7aac198e7441733f309da45f59fc7093f62" exitCode=0 Nov 26 00:14:26 crc kubenswrapper[5045]: I1126 00:14:26.103062 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s4dls" event={"ID":"7196e986-7413-4477-ae8c-2f1a601a320a","Type":"ContainerDied","Data":"bac60a68dfc4a18a6ab64fc24b9ef7aac198e7441733f309da45f59fc7093f62"} Nov 26 00:14:26 crc kubenswrapper[5045]: I1126 00:14:26.103103 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s4dls" event={"ID":"7196e986-7413-4477-ae8c-2f1a601a320a","Type":"ContainerDied","Data":"d8d4277e6a07443a1d1c54a62907890415b4b58ca5dc4d3f235c6d5a3ad37694"} Nov 26 00:14:26 crc kubenswrapper[5045]: I1126 00:14:26.103102 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s4dls" Nov 26 00:14:26 crc kubenswrapper[5045]: I1126 00:14:26.103179 5045 scope.go:117] "RemoveContainer" containerID="bac60a68dfc4a18a6ab64fc24b9ef7aac198e7441733f309da45f59fc7093f62" Nov 26 00:14:26 crc kubenswrapper[5045]: I1126 00:14:26.130585 5045 scope.go:117] "RemoveContainer" containerID="d708eb6af3c70536fa3220147eaa52db98e9f06947531214964cec699bd6bc6b" Nov 26 00:14:26 crc kubenswrapper[5045]: I1126 00:14:26.139730 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-s4dls"] Nov 26 00:14:26 crc kubenswrapper[5045]: I1126 00:14:26.152052 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-s4dls"] Nov 26 00:14:26 crc kubenswrapper[5045]: I1126 00:14:26.153850 5045 scope.go:117] "RemoveContainer" containerID="d473372ef92ef9a9d522629e045446e6456dceb31f4db0a1927580ebb589325e" Nov 26 00:14:26 crc kubenswrapper[5045]: I1126 00:14:26.194954 5045 scope.go:117] "RemoveContainer" containerID="bac60a68dfc4a18a6ab64fc24b9ef7aac198e7441733f309da45f59fc7093f62" Nov 26 00:14:26 crc kubenswrapper[5045]: E1126 00:14:26.195447 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bac60a68dfc4a18a6ab64fc24b9ef7aac198e7441733f309da45f59fc7093f62\": container with ID starting with bac60a68dfc4a18a6ab64fc24b9ef7aac198e7441733f309da45f59fc7093f62 not found: ID does not exist" containerID="bac60a68dfc4a18a6ab64fc24b9ef7aac198e7441733f309da45f59fc7093f62" Nov 26 00:14:26 crc kubenswrapper[5045]: I1126 00:14:26.195494 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bac60a68dfc4a18a6ab64fc24b9ef7aac198e7441733f309da45f59fc7093f62"} err="failed to get container status \"bac60a68dfc4a18a6ab64fc24b9ef7aac198e7441733f309da45f59fc7093f62\": rpc error: code = NotFound desc = could not find container \"bac60a68dfc4a18a6ab64fc24b9ef7aac198e7441733f309da45f59fc7093f62\": container with ID starting with bac60a68dfc4a18a6ab64fc24b9ef7aac198e7441733f309da45f59fc7093f62 not found: ID does not exist" Nov 26 00:14:26 crc kubenswrapper[5045]: I1126 00:14:26.195520 5045 scope.go:117] "RemoveContainer" containerID="d708eb6af3c70536fa3220147eaa52db98e9f06947531214964cec699bd6bc6b" Nov 26 00:14:26 crc kubenswrapper[5045]: E1126 00:14:26.195976 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d708eb6af3c70536fa3220147eaa52db98e9f06947531214964cec699bd6bc6b\": container with ID starting with d708eb6af3c70536fa3220147eaa52db98e9f06947531214964cec699bd6bc6b not found: ID does not exist" containerID="d708eb6af3c70536fa3220147eaa52db98e9f06947531214964cec699bd6bc6b" Nov 26 00:14:26 crc kubenswrapper[5045]: I1126 00:14:26.196007 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d708eb6af3c70536fa3220147eaa52db98e9f06947531214964cec699bd6bc6b"} err="failed to get container status \"d708eb6af3c70536fa3220147eaa52db98e9f06947531214964cec699bd6bc6b\": rpc error: code = NotFound desc = could not find container \"d708eb6af3c70536fa3220147eaa52db98e9f06947531214964cec699bd6bc6b\": container with ID starting with d708eb6af3c70536fa3220147eaa52db98e9f06947531214964cec699bd6bc6b not found: ID does not exist" Nov 26 00:14:26 crc kubenswrapper[5045]: I1126 00:14:26.196029 5045 scope.go:117] "RemoveContainer" containerID="d473372ef92ef9a9d522629e045446e6456dceb31f4db0a1927580ebb589325e" Nov 26 00:14:26 crc kubenswrapper[5045]: E1126 00:14:26.196235 5045 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d473372ef92ef9a9d522629e045446e6456dceb31f4db0a1927580ebb589325e\": container with ID starting with d473372ef92ef9a9d522629e045446e6456dceb31f4db0a1927580ebb589325e not found: ID does not exist" containerID="d473372ef92ef9a9d522629e045446e6456dceb31f4db0a1927580ebb589325e" Nov 26 00:14:26 crc kubenswrapper[5045]: I1126 00:14:26.196261 5045 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d473372ef92ef9a9d522629e045446e6456dceb31f4db0a1927580ebb589325e"} err="failed to get container status \"d473372ef92ef9a9d522629e045446e6456dceb31f4db0a1927580ebb589325e\": rpc error: code = NotFound desc = could not find container \"d473372ef92ef9a9d522629e045446e6456dceb31f4db0a1927580ebb589325e\": container with ID starting with d473372ef92ef9a9d522629e045446e6456dceb31f4db0a1927580ebb589325e not found: ID does not exist" Nov 26 00:14:26 crc kubenswrapper[5045]: I1126 00:14:26.412582 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7196e986-7413-4477-ae8c-2f1a601a320a" path="/var/lib/kubelet/pods/7196e986-7413-4477-ae8c-2f1a601a320a/volumes" Nov 26 00:14:34 crc kubenswrapper[5045]: I1126 00:14:34.408878 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:14:34 crc kubenswrapper[5045]: E1126 00:14:34.410347 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:14:45 crc kubenswrapper[5045]: I1126 00:14:45.397063 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:14:45 crc kubenswrapper[5045]: E1126 00:14:45.397927 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:15:00 crc kubenswrapper[5045]: I1126 00:15:00.247434 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401935-4l5bh"] Nov 26 00:15:00 crc kubenswrapper[5045]: E1126 00:15:00.248958 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7196e986-7413-4477-ae8c-2f1a601a320a" containerName="extract-utilities" Nov 26 00:15:00 crc kubenswrapper[5045]: I1126 00:15:00.248996 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="7196e986-7413-4477-ae8c-2f1a601a320a" containerName="extract-utilities" Nov 26 00:15:00 crc kubenswrapper[5045]: E1126 00:15:00.249028 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7196e986-7413-4477-ae8c-2f1a601a320a" containerName="registry-server" Nov 26 00:15:00 crc kubenswrapper[5045]: I1126 00:15:00.249042 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="7196e986-7413-4477-ae8c-2f1a601a320a" containerName="registry-server" Nov 26 00:15:00 crc kubenswrapper[5045]: E1126 00:15:00.249074 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7196e986-7413-4477-ae8c-2f1a601a320a" containerName="extract-content" Nov 26 00:15:00 crc kubenswrapper[5045]: I1126 00:15:00.249087 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="7196e986-7413-4477-ae8c-2f1a601a320a" containerName="extract-content" Nov 26 00:15:00 crc kubenswrapper[5045]: I1126 00:15:00.249829 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="7196e986-7413-4477-ae8c-2f1a601a320a" containerName="registry-server" Nov 26 00:15:00 crc kubenswrapper[5045]: I1126 00:15:00.250825 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-4l5bh" Nov 26 00:15:00 crc kubenswrapper[5045]: I1126 00:15:00.252817 5045 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 00:15:00 crc kubenswrapper[5045]: I1126 00:15:00.253224 5045 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 00:15:00 crc kubenswrapper[5045]: I1126 00:15:00.265600 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401935-4l5bh"] Nov 26 00:15:00 crc kubenswrapper[5045]: I1126 00:15:00.397042 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:15:00 crc kubenswrapper[5045]: E1126 00:15:00.397419 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:15:00 crc kubenswrapper[5045]: I1126 00:15:00.414646 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54c9ee96-a44f-47d9-8eba-1772aaa1399b-config-volume\") pod \"collect-profiles-29401935-4l5bh\" (UID: \"54c9ee96-a44f-47d9-8eba-1772aaa1399b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-4l5bh" Nov 26 00:15:00 crc kubenswrapper[5045]: I1126 00:15:00.414871 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/54c9ee96-a44f-47d9-8eba-1772aaa1399b-secret-volume\") pod \"collect-profiles-29401935-4l5bh\" (UID: \"54c9ee96-a44f-47d9-8eba-1772aaa1399b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-4l5bh" Nov 26 00:15:00 crc kubenswrapper[5045]: I1126 00:15:00.415042 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cq8rw\" (UniqueName: \"kubernetes.io/projected/54c9ee96-a44f-47d9-8eba-1772aaa1399b-kube-api-access-cq8rw\") pod \"collect-profiles-29401935-4l5bh\" (UID: \"54c9ee96-a44f-47d9-8eba-1772aaa1399b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-4l5bh" Nov 26 00:15:00 crc kubenswrapper[5045]: I1126 00:15:00.516502 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54c9ee96-a44f-47d9-8eba-1772aaa1399b-config-volume\") pod \"collect-profiles-29401935-4l5bh\" (UID: \"54c9ee96-a44f-47d9-8eba-1772aaa1399b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-4l5bh" Nov 26 00:15:00 crc kubenswrapper[5045]: I1126 00:15:00.516597 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/54c9ee96-a44f-47d9-8eba-1772aaa1399b-secret-volume\") pod \"collect-profiles-29401935-4l5bh\" (UID: \"54c9ee96-a44f-47d9-8eba-1772aaa1399b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-4l5bh" Nov 26 00:15:00 crc kubenswrapper[5045]: I1126 00:15:00.516671 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cq8rw\" (UniqueName: \"kubernetes.io/projected/54c9ee96-a44f-47d9-8eba-1772aaa1399b-kube-api-access-cq8rw\") pod \"collect-profiles-29401935-4l5bh\" (UID: \"54c9ee96-a44f-47d9-8eba-1772aaa1399b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-4l5bh" Nov 26 00:15:00 crc kubenswrapper[5045]: I1126 00:15:00.518207 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54c9ee96-a44f-47d9-8eba-1772aaa1399b-config-volume\") pod \"collect-profiles-29401935-4l5bh\" (UID: \"54c9ee96-a44f-47d9-8eba-1772aaa1399b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-4l5bh" Nov 26 00:15:00 crc kubenswrapper[5045]: I1126 00:15:00.527482 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/54c9ee96-a44f-47d9-8eba-1772aaa1399b-secret-volume\") pod \"collect-profiles-29401935-4l5bh\" (UID: \"54c9ee96-a44f-47d9-8eba-1772aaa1399b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-4l5bh" Nov 26 00:15:00 crc kubenswrapper[5045]: I1126 00:15:00.537993 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cq8rw\" (UniqueName: \"kubernetes.io/projected/54c9ee96-a44f-47d9-8eba-1772aaa1399b-kube-api-access-cq8rw\") pod \"collect-profiles-29401935-4l5bh\" (UID: \"54c9ee96-a44f-47d9-8eba-1772aaa1399b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-4l5bh" Nov 26 00:15:00 crc kubenswrapper[5045]: I1126 00:15:00.576887 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-4l5bh" Nov 26 00:15:01 crc kubenswrapper[5045]: I1126 00:15:01.072213 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401935-4l5bh"] Nov 26 00:15:01 crc kubenswrapper[5045]: W1126 00:15:01.080268 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod54c9ee96_a44f_47d9_8eba_1772aaa1399b.slice/crio-66ff65fd183c73604fe0cddaa9c8102685ba7df13043571968f9e905d7411154 WatchSource:0}: Error finding container 66ff65fd183c73604fe0cddaa9c8102685ba7df13043571968f9e905d7411154: Status 404 returned error can't find the container with id 66ff65fd183c73604fe0cddaa9c8102685ba7df13043571968f9e905d7411154 Nov 26 00:15:01 crc kubenswrapper[5045]: I1126 00:15:01.476808 5045 generic.go:334] "Generic (PLEG): container finished" podID="54c9ee96-a44f-47d9-8eba-1772aaa1399b" containerID="e858818a20e6b786677a8b365660b74fbe4770649db71833fd7e2bfa2bbe15f6" exitCode=0 Nov 26 00:15:01 crc kubenswrapper[5045]: I1126 00:15:01.476848 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-4l5bh" event={"ID":"54c9ee96-a44f-47d9-8eba-1772aaa1399b","Type":"ContainerDied","Data":"e858818a20e6b786677a8b365660b74fbe4770649db71833fd7e2bfa2bbe15f6"} Nov 26 00:15:01 crc kubenswrapper[5045]: I1126 00:15:01.476874 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-4l5bh" event={"ID":"54c9ee96-a44f-47d9-8eba-1772aaa1399b","Type":"ContainerStarted","Data":"66ff65fd183c73604fe0cddaa9c8102685ba7df13043571968f9e905d7411154"} Nov 26 00:15:02 crc kubenswrapper[5045]: I1126 00:15:02.867186 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-4l5bh" Nov 26 00:15:02 crc kubenswrapper[5045]: I1126 00:15:02.876250 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54c9ee96-a44f-47d9-8eba-1772aaa1399b-config-volume\") pod \"54c9ee96-a44f-47d9-8eba-1772aaa1399b\" (UID: \"54c9ee96-a44f-47d9-8eba-1772aaa1399b\") " Nov 26 00:15:02 crc kubenswrapper[5045]: I1126 00:15:02.876565 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/54c9ee96-a44f-47d9-8eba-1772aaa1399b-secret-volume\") pod \"54c9ee96-a44f-47d9-8eba-1772aaa1399b\" (UID: \"54c9ee96-a44f-47d9-8eba-1772aaa1399b\") " Nov 26 00:15:02 crc kubenswrapper[5045]: I1126 00:15:02.876697 5045 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cq8rw\" (UniqueName: \"kubernetes.io/projected/54c9ee96-a44f-47d9-8eba-1772aaa1399b-kube-api-access-cq8rw\") pod \"54c9ee96-a44f-47d9-8eba-1772aaa1399b\" (UID: \"54c9ee96-a44f-47d9-8eba-1772aaa1399b\") " Nov 26 00:15:02 crc kubenswrapper[5045]: I1126 00:15:02.876919 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54c9ee96-a44f-47d9-8eba-1772aaa1399b-config-volume" (OuterVolumeSpecName: "config-volume") pod "54c9ee96-a44f-47d9-8eba-1772aaa1399b" (UID: "54c9ee96-a44f-47d9-8eba-1772aaa1399b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:15:02 crc kubenswrapper[5045]: I1126 00:15:02.877414 5045 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54c9ee96-a44f-47d9-8eba-1772aaa1399b-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 00:15:02 crc kubenswrapper[5045]: I1126 00:15:02.884619 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54c9ee96-a44f-47d9-8eba-1772aaa1399b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "54c9ee96-a44f-47d9-8eba-1772aaa1399b" (UID: "54c9ee96-a44f-47d9-8eba-1772aaa1399b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:15:02 crc kubenswrapper[5045]: I1126 00:15:02.885974 5045 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54c9ee96-a44f-47d9-8eba-1772aaa1399b-kube-api-access-cq8rw" (OuterVolumeSpecName: "kube-api-access-cq8rw") pod "54c9ee96-a44f-47d9-8eba-1772aaa1399b" (UID: "54c9ee96-a44f-47d9-8eba-1772aaa1399b"). InnerVolumeSpecName "kube-api-access-cq8rw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:15:02 crc kubenswrapper[5045]: I1126 00:15:02.979265 5045 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/54c9ee96-a44f-47d9-8eba-1772aaa1399b-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 00:15:02 crc kubenswrapper[5045]: I1126 00:15:02.979680 5045 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cq8rw\" (UniqueName: \"kubernetes.io/projected/54c9ee96-a44f-47d9-8eba-1772aaa1399b-kube-api-access-cq8rw\") on node \"crc\" DevicePath \"\"" Nov 26 00:15:03 crc kubenswrapper[5045]: I1126 00:15:03.506424 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-4l5bh" event={"ID":"54c9ee96-a44f-47d9-8eba-1772aaa1399b","Type":"ContainerDied","Data":"66ff65fd183c73604fe0cddaa9c8102685ba7df13043571968f9e905d7411154"} Nov 26 00:15:03 crc kubenswrapper[5045]: I1126 00:15:03.506540 5045 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="66ff65fd183c73604fe0cddaa9c8102685ba7df13043571968f9e905d7411154" Nov 26 00:15:03 crc kubenswrapper[5045]: I1126 00:15:03.506567 5045 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-4l5bh" Nov 26 00:15:03 crc kubenswrapper[5045]: I1126 00:15:03.962241 5045 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd"] Nov 26 00:15:03 crc kubenswrapper[5045]: I1126 00:15:03.971664 5045 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401890-bsptd"] Nov 26 00:15:04 crc kubenswrapper[5045]: I1126 00:15:04.423185 5045 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc" path="/var/lib/kubelet/pods/a2cd6d72-6e2a-433a-b0f7-dd5fa0a597cc/volumes" Nov 26 00:15:11 crc kubenswrapper[5045]: I1126 00:15:11.396585 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:15:11 crc kubenswrapper[5045]: E1126 00:15:11.397773 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:15:22 crc kubenswrapper[5045]: I1126 00:15:22.397169 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:15:22 crc kubenswrapper[5045]: E1126 00:15:22.398107 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:15:23 crc kubenswrapper[5045]: I1126 00:15:23.259012 5045 scope.go:117] "RemoveContainer" containerID="65843bade3fbaae09606c09fb4c538518d062b2177c6e9a847204522e5be4601" Nov 26 00:15:26 crc kubenswrapper[5045]: I1126 00:15:26.769933 5045 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="981d9260-fe05-4c33-9c46-c65a7a31c7b1" containerName="galera" probeResult="failure" output="command timed out" Nov 26 00:15:35 crc kubenswrapper[5045]: I1126 00:15:35.397204 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:15:35 crc kubenswrapper[5045]: E1126 00:15:35.398564 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:15:49 crc kubenswrapper[5045]: I1126 00:15:49.396683 5045 scope.go:117] "RemoveContainer" containerID="04ed3b9b26a41dda4dece865ea38650b6459f150a96a6a3ea26d034ea1b850a7" Nov 26 00:15:49 crc kubenswrapper[5045]: E1126 00:15:49.397908 5045 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7dpm4_openshift-machine-config-operator(bc394db7-8b38-4abe-841d-83a3ea3d07b0)\"" pod="openshift-machine-config-operator/machine-config-daemon-7dpm4" podUID="bc394db7-8b38-4abe-841d-83a3ea3d07b0" Nov 26 00:15:52 crc kubenswrapper[5045]: I1126 00:15:52.172311 5045 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4hmng"] Nov 26 00:15:52 crc kubenswrapper[5045]: E1126 00:15:52.175799 5045 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c9ee96-a44f-47d9-8eba-1772aaa1399b" containerName="collect-profiles" Nov 26 00:15:52 crc kubenswrapper[5045]: I1126 00:15:52.175830 5045 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c9ee96-a44f-47d9-8eba-1772aaa1399b" containerName="collect-profiles" Nov 26 00:15:52 crc kubenswrapper[5045]: I1126 00:15:52.176161 5045 memory_manager.go:354] "RemoveStaleState removing state" podUID="54c9ee96-a44f-47d9-8eba-1772aaa1399b" containerName="collect-profiles" Nov 26 00:15:52 crc kubenswrapper[5045]: I1126 00:15:52.178151 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4hmng" Nov 26 00:15:52 crc kubenswrapper[5045]: I1126 00:15:52.187269 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4hmng"] Nov 26 00:15:52 crc kubenswrapper[5045]: I1126 00:15:52.279079 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d316127-ad99-49e6-957b-db812e6834f7-utilities\") pod \"redhat-marketplace-4hmng\" (UID: \"0d316127-ad99-49e6-957b-db812e6834f7\") " pod="openshift-marketplace/redhat-marketplace-4hmng" Nov 26 00:15:52 crc kubenswrapper[5045]: I1126 00:15:52.279135 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mxfz8\" (UniqueName: \"kubernetes.io/projected/0d316127-ad99-49e6-957b-db812e6834f7-kube-api-access-mxfz8\") pod \"redhat-marketplace-4hmng\" (UID: \"0d316127-ad99-49e6-957b-db812e6834f7\") " pod="openshift-marketplace/redhat-marketplace-4hmng" Nov 26 00:15:52 crc kubenswrapper[5045]: I1126 00:15:52.279435 5045 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d316127-ad99-49e6-957b-db812e6834f7-catalog-content\") pod \"redhat-marketplace-4hmng\" (UID: \"0d316127-ad99-49e6-957b-db812e6834f7\") " pod="openshift-marketplace/redhat-marketplace-4hmng" Nov 26 00:15:52 crc kubenswrapper[5045]: I1126 00:15:52.380419 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d316127-ad99-49e6-957b-db812e6834f7-catalog-content\") pod \"redhat-marketplace-4hmng\" (UID: \"0d316127-ad99-49e6-957b-db812e6834f7\") " pod="openshift-marketplace/redhat-marketplace-4hmng" Nov 26 00:15:52 crc kubenswrapper[5045]: I1126 00:15:52.380518 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d316127-ad99-49e6-957b-db812e6834f7-utilities\") pod \"redhat-marketplace-4hmng\" (UID: \"0d316127-ad99-49e6-957b-db812e6834f7\") " pod="openshift-marketplace/redhat-marketplace-4hmng" Nov 26 00:15:52 crc kubenswrapper[5045]: I1126 00:15:52.380554 5045 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mxfz8\" (UniqueName: \"kubernetes.io/projected/0d316127-ad99-49e6-957b-db812e6834f7-kube-api-access-mxfz8\") pod \"redhat-marketplace-4hmng\" (UID: \"0d316127-ad99-49e6-957b-db812e6834f7\") " pod="openshift-marketplace/redhat-marketplace-4hmng" Nov 26 00:15:52 crc kubenswrapper[5045]: I1126 00:15:52.380938 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d316127-ad99-49e6-957b-db812e6834f7-catalog-content\") pod \"redhat-marketplace-4hmng\" (UID: \"0d316127-ad99-49e6-957b-db812e6834f7\") " pod="openshift-marketplace/redhat-marketplace-4hmng" Nov 26 00:15:52 crc kubenswrapper[5045]: I1126 00:15:52.381175 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d316127-ad99-49e6-957b-db812e6834f7-utilities\") pod \"redhat-marketplace-4hmng\" (UID: \"0d316127-ad99-49e6-957b-db812e6834f7\") " pod="openshift-marketplace/redhat-marketplace-4hmng" Nov 26 00:15:52 crc kubenswrapper[5045]: I1126 00:15:52.410508 5045 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mxfz8\" (UniqueName: \"kubernetes.io/projected/0d316127-ad99-49e6-957b-db812e6834f7-kube-api-access-mxfz8\") pod \"redhat-marketplace-4hmng\" (UID: \"0d316127-ad99-49e6-957b-db812e6834f7\") " pod="openshift-marketplace/redhat-marketplace-4hmng" Nov 26 00:15:52 crc kubenswrapper[5045]: I1126 00:15:52.523035 5045 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4hmng" Nov 26 00:15:53 crc kubenswrapper[5045]: I1126 00:15:53.061489 5045 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4hmng"] Nov 26 00:15:53 crc kubenswrapper[5045]: W1126 00:15:53.079164 5045 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d316127_ad99_49e6_957b_db812e6834f7.slice/crio-c7243243c0a9bd462f5ac7fe49985732208509375d65b6a93ad22324536ea176 WatchSource:0}: Error finding container c7243243c0a9bd462f5ac7fe49985732208509375d65b6a93ad22324536ea176: Status 404 returned error can't find the container with id c7243243c0a9bd462f5ac7fe49985732208509375d65b6a93ad22324536ea176 Nov 26 00:15:54 crc kubenswrapper[5045]: I1126 00:15:54.102005 5045 generic.go:334] "Generic (PLEG): container finished" podID="0d316127-ad99-49e6-957b-db812e6834f7" containerID="fa802bbb00bbb658cc29364f7848cb557b073652cefe8e2c2313c7cfee1b7cf1" exitCode=0 Nov 26 00:15:54 crc kubenswrapper[5045]: I1126 00:15:54.102842 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4hmng" event={"ID":"0d316127-ad99-49e6-957b-db812e6834f7","Type":"ContainerDied","Data":"fa802bbb00bbb658cc29364f7848cb557b073652cefe8e2c2313c7cfee1b7cf1"} Nov 26 00:15:54 crc kubenswrapper[5045]: I1126 00:15:54.102906 5045 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4hmng" event={"ID":"0d316127-ad99-49e6-957b-db812e6834f7","Type":"ContainerStarted","Data":"c7243243c0a9bd462f5ac7fe49985732208509375d65b6a93ad22324536ea176"} var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111443301024436 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111443301017353 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111431746016510 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111431747015461 5ustar corecore